2024-12-06 08:18:26,309 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-06 08:18:26,326 main DEBUG Took 0.014631 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-06 08:18:26,326 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-06 08:18:26,327 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-06 08:18:26,328 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-06 08:18:26,329 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,339 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-06 08:18:26,356 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,358 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,358 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,359 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,360 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,360 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,361 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,361 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,362 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,362 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,363 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,364 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,364 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,365 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,366 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,366 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,367 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,367 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,368 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,368 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,369 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,369 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,369 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,370 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:18:26,370 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,371 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-06 08:18:26,373 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:18:26,374 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-06 08:18:26,377 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-06 08:18:26,377 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-06 08:18:26,379 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-06 08:18:26,379 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-06 08:18:26,390 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-06 08:18:26,393 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-06 08:18:26,395 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-06 08:18:26,396 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-06 08:18:26,396 main DEBUG createAppenders(={Console}) 2024-12-06 08:18:26,397 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-06 08:18:26,398 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-06 08:18:26,398 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-06 08:18:26,399 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-06 08:18:26,399 main DEBUG OutputStream closed 2024-12-06 08:18:26,399 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-06 08:18:26,400 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-06 08:18:26,400 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-06 08:18:26,491 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-06 08:18:26,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-06 08:18:26,495 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-06 08:18:26,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-06 08:18:26,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-06 08:18:26,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-06 08:18:26,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-06 08:18:26,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-06 08:18:26,499 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-06 08:18:26,499 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-06 08:18:26,499 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-06 08:18:26,500 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-06 08:18:26,501 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-06 08:18:26,501 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-06 08:18:26,502 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-06 08:18:26,502 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-06 08:18:26,502 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-06 08:18:26,503 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-06 08:18:26,505 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06 08:18:26,505 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-06 08:18:26,506 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-06 08:18:26,506 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-06T08:18:26,795 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492 2024-12-06 08:18:26,799 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-06 08:18:26,800 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06T08:18:26,813 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-06T08:18:26,839 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T08:18:26,842 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/cluster_82222ce2-4af9-3ff1-6acd-1ed38eead44b, deleteOnExit=true 2024-12-06T08:18:26,843 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T08:18:26,843 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/test.cache.data in system properties and HBase conf 2024-12-06T08:18:26,844 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T08:18:26,845 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/hadoop.log.dir in system properties and HBase conf 2024-12-06T08:18:26,846 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T08:18:26,846 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T08:18:26,847 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T08:18:26,944 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-06T08:18:27,036 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T08:18:27,041 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:18:27,042 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:18:27,042 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T08:18:27,043 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:18:27,043 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T08:18:27,043 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T08:18:27,044 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:18:27,044 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:18:27,045 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T08:18:27,045 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/nfs.dump.dir in system properties and HBase conf 2024-12-06T08:18:27,045 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/java.io.tmpdir in system properties and HBase conf 2024-12-06T08:18:27,046 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:18:27,047 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T08:18:27,047 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T08:18:27,976 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-06T08:18:28,053 INFO [Time-limited test {}] log.Log(170): Logging initialized @2490ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-06T08:18:28,134 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:18:28,206 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:18:28,231 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:18:28,231 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:18:28,233 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:18:28,251 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:18:28,255 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:18:28,256 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:18:28,480 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/java.io.tmpdir/jetty-localhost-34447-hadoop-hdfs-3_4_1-tests_jar-_-any-15851767448613883979/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:18:28,487 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:34447} 2024-12-06T08:18:28,488 INFO [Time-limited test {}] server.Server(415): Started @2926ms 2024-12-06T08:18:28,900 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:18:28,907 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:18:28,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:18:28,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:18:28,908 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:18:28,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@134e7cc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:18:28,910 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca71a25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:18:29,028 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10ba49e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/java.io.tmpdir/jetty-localhost-34407-hadoop-hdfs-3_4_1-tests_jar-_-any-11421833223222985359/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:18:29,029 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@661c2e9c{HTTP/1.1, (http/1.1)}{localhost:34407} 2024-12-06T08:18:29,029 INFO [Time-limited test {}] server.Server(415): Started @3468ms 2024-12-06T08:18:29,086 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:18:29,575 WARN [Thread-71 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/cluster_82222ce2-4af9-3ff1-6acd-1ed38eead44b/dfs/data/data1/current/BP-131426492-172.17.0.2-1733473107734/current, will proceed with Du for space computation calculation, 2024-12-06T08:18:29,576 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/cluster_82222ce2-4af9-3ff1-6acd-1ed38eead44b/dfs/data/data2/current/BP-131426492-172.17.0.2-1733473107734/current, will proceed with Du for space computation calculation, 2024-12-06T08:18:29,648 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:18:29,716 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x483682dfcbad8e1e with lease ID 0x8197998018c63daa: Processing first storage report for DS-ad8931c2-01a4-4c7a-9f0a-20570fd0ac9c from datanode DatanodeRegistration(127.0.0.1:40801, datanodeUuid=1550839e-efe6-40a4-bd97-ee939e8dac26, infoPort=39297, infoSecurePort=0, ipcPort=45287, storageInfo=lv=-57;cid=testClusterID;nsid=1194603563;c=1733473107734) 2024-12-06T08:18:29,717 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x483682dfcbad8e1e with lease ID 0x8197998018c63daa: from storage DS-ad8931c2-01a4-4c7a-9f0a-20570fd0ac9c node DatanodeRegistration(127.0.0.1:40801, datanodeUuid=1550839e-efe6-40a4-bd97-ee939e8dac26, infoPort=39297, infoSecurePort=0, ipcPort=45287, storageInfo=lv=-57;cid=testClusterID;nsid=1194603563;c=1733473107734), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T08:18:29,717 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x483682dfcbad8e1e with lease ID 0x8197998018c63daa: Processing first storage report for DS-df1be192-7eab-4e9d-a04b-e0e2afa3c8a5 from datanode DatanodeRegistration(127.0.0.1:40801, datanodeUuid=1550839e-efe6-40a4-bd97-ee939e8dac26, infoPort=39297, infoSecurePort=0, ipcPort=45287, storageInfo=lv=-57;cid=testClusterID;nsid=1194603563;c=1733473107734) 2024-12-06T08:18:29,717 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x483682dfcbad8e1e with lease ID 0x8197998018c63daa: from storage DS-df1be192-7eab-4e9d-a04b-e0e2afa3c8a5 node DatanodeRegistration(127.0.0.1:40801, datanodeUuid=1550839e-efe6-40a4-bd97-ee939e8dac26, infoPort=39297, infoSecurePort=0, ipcPort=45287, storageInfo=lv=-57;cid=testClusterID;nsid=1194603563;c=1733473107734), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:18:29,742 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492 2024-12-06T08:18:29,827 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/cluster_82222ce2-4af9-3ff1-6acd-1ed38eead44b/zookeeper_0, clientPort=65195, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/cluster_82222ce2-4af9-3ff1-6acd-1ed38eead44b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/cluster_82222ce2-4af9-3ff1-6acd-1ed38eead44b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T08:18:29,838 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=65195 2024-12-06T08:18:29,850 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:18:29,853 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:18:30,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:18:30,540 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156 with version=8 2024-12-06T08:18:30,540 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/hbase-staging 2024-12-06T08:18:30,682 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-06T08:18:30,966 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:18:30,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:18:30,986 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:18:30,986 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:18:30,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:18:30,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:18:31,136 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:18:31,199 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-06T08:18:31,208 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-06T08:18:31,212 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:18:31,240 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 6525 (auto-detected) 2024-12-06T08:18:31,241 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-06T08:18:31,260 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42303 2024-12-06T08:18:31,268 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:18:31,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:18:31,282 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:42303 connecting to ZooKeeper ensemble=127.0.0.1:65195 2024-12-06T08:18:31,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:423030x0, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:18:31,320 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42303-0x100666840130000 connected 2024-12-06T08:18:31,354 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:18:31,357 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:18:31,361 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:18:31,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42303 2024-12-06T08:18:31,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42303 2024-12-06T08:18:31,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42303 2024-12-06T08:18:31,371 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42303 2024-12-06T08:18:31,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42303 2024-12-06T08:18:31,380 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156, hbase.cluster.distributed=false 2024-12-06T08:18:31,443 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:18:31,443 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:18:31,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:18:31,444 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:18:31,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:18:31,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:18:31,446 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:18:31,448 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:18:31,449 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38041 2024-12-06T08:18:31,451 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:18:31,456 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:18:31,458 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:18:31,462 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:18:31,467 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:38041 connecting to ZooKeeper ensemble=127.0.0.1:65195 2024-12-06T08:18:31,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380410x0, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:18:31,472 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38041-0x100666840130001 connected 2024-12-06T08:18:31,472 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:18:31,474 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:18:31,475 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:18:31,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38041 2024-12-06T08:18:31,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38041 2024-12-06T08:18:31,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38041 2024-12-06T08:18:31,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38041 2024-12-06T08:18:31,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38041 2024-12-06T08:18:31,479 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b6b797fc3981,42303,1733473110672 2024-12-06T08:18:31,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:18:31,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:18:31,488 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b6b797fc3981,42303,1733473110672 2024-12-06T08:18:31,496 DEBUG [M:0;b6b797fc3981:42303 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b6b797fc3981:42303 2024-12-06T08:18:31,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:18:31,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:18:31,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:31,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:31,511 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:18:31,512 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:18:31,512 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b6b797fc3981,42303,1733473110672 from backup master directory 2024-12-06T08:18:31,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b6b797fc3981,42303,1733473110672 2024-12-06T08:18:31,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:18:31,516 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:18:31,516 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b6b797fc3981,42303,1733473110672 2024-12-06T08:18:31,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:18:31,519 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-06T08:18:31,520 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-06T08:18:31,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:18:31,993 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/hbase.id with ID: 3cd4d453-1d4b-47b8-985a-b74ac30d34b0 2024-12-06T08:18:32,043 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:18:32,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:32,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:32,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:18:32,509 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:18:32,511 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T08:18:32,531 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:32,537 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T08:18:32,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:18:32,588 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store 2024-12-06T08:18:32,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:18:33,014 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-06T08:18:33,014 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:18:33,016 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:18:33,016 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:18:33,016 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:18:33,017 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:18:33,017 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:18:33,017 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:18:33,017 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:18:33,020 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/.initializing 2024-12-06T08:18:33,020 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/WALs/b6b797fc3981,42303,1733473110672 2024-12-06T08:18:33,028 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T08:18:33,040 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C42303%2C1733473110672, suffix=, logDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/WALs/b6b797fc3981,42303,1733473110672, archiveDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/oldWALs, maxLogs=10 2024-12-06T08:18:33,062 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/WALs/b6b797fc3981,42303,1733473110672/b6b797fc3981%2C42303%2C1733473110672.1733473113044, exclude list is [], retry=0 2024-12-06T08:18:33,078 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40801,DS-ad8931c2-01a4-4c7a-9f0a-20570fd0ac9c,DISK] 2024-12-06T08:18:33,081 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-06T08:18:33,117 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/WALs/b6b797fc3981,42303,1733473110672/b6b797fc3981%2C42303%2C1733473110672.1733473113044 2024-12-06T08:18:33,118 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39297:39297)] 2024-12-06T08:18:33,119 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:18:33,119 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:18:33,123 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:18:33,124 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:18:33,161 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:18:33,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T08:18:33,190 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:33,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:18:33,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:18:33,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T08:18:33,197 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:33,198 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:18:33,198 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:18:33,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T08:18:33,201 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:33,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:18:33,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:18:33,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T08:18:33,205 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:33,206 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:18:33,210 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:18:33,212 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:18:33,220 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:18:33,225 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:18:33,229 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:18:33,230 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71107461, jitterRate=0.05958373844623566}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:18:33,235 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:18:33,236 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T08:18:33,265 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2835dcfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:33,299 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T08:18:33,310 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T08:18:33,310 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T08:18:33,312 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T08:18:33,314 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-06T08:18:33,319 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-06T08:18:33,319 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T08:18:33,343 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T08:18:33,355 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T08:18:33,358 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T08:18:33,360 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T08:18:33,361 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T08:18:33,363 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T08:18:33,365 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T08:18:33,369 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T08:18:33,370 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T08:18:33,371 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T08:18:33,373 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T08:18:33,382 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T08:18:33,384 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T08:18:33,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:18:33,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:18:33,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:33,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:33,388 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b6b797fc3981,42303,1733473110672, sessionid=0x100666840130000, setting cluster-up flag (Was=false) 2024-12-06T08:18:33,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:33,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:33,407 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T08:18:33,409 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,42303,1733473110672 2024-12-06T08:18:33,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:33,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:33,419 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T08:18:33,420 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,42303,1733473110672 2024-12-06T08:18:33,494 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b6b797fc3981:38041 2024-12-06T08:18:33,496 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1008): ClusterId : 3cd4d453-1d4b-47b8-985a-b74ac30d34b0 2024-12-06T08:18:33,499 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:18:33,502 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T08:18:33,504 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:18:33,504 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:18:33,507 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:18:33,508 DEBUG [RS:0;b6b797fc3981:38041 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55f07030, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:33,508 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T08:18:33,509 DEBUG [RS:0;b6b797fc3981:38041 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39ad7983, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:18:33,511 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T08:18:33,512 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T08:18:33,512 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T08:18:33,512 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T08:18:33,514 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(3073): reportForDuty to master=b6b797fc3981,42303,1733473110672 with isa=b6b797fc3981/172.17.0.2:38041, startcode=1733473111442 2024-12-06T08:18:33,517 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b6b797fc3981,42303,1733473110672 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T08:18:33,521 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:18:33,521 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:18:33,521 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:18:33,521 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:18:33,522 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b6b797fc3981:0, corePoolSize=10, maxPoolSize=10 2024-12-06T08:18:33,522 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,522 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:18:33,523 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,528 DEBUG [RS:0;b6b797fc3981:38041 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:18:33,529 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:18:33,530 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733473143529 2024-12-06T08:18:33,530 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T08:18:33,532 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T08:18:33,533 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T08:18:33,535 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:33,535 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:18:33,537 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T08:18:33,538 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T08:18:33,538 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T08:18:33,539 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T08:18:33,539 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,542 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T08:18:33,544 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T08:18:33,544 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T08:18:33,548 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T08:18:33,548 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T08:18:33,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:18:33,551 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733473113550,5,FailOnTimeoutGroup] 2024-12-06T08:18:33,552 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733473113552,5,FailOnTimeoutGroup] 2024-12-06T08:18:33,552 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T08:18:33,552 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,553 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156 2024-12-06T08:18:33,553 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T08:18:33,554 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,555 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:18:33,572 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33491, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:18:33,577 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42303 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b6b797fc3981,38041,1733473111442 2024-12-06T08:18:33,580 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42303 {}] master.ServerManager(486): Registering regionserver=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:33,594 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156 2024-12-06T08:18:33,594 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:43731 2024-12-06T08:18:33,594 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T08:18:33,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:18:33,599 DEBUG [RS:0;b6b797fc3981:38041 {}] zookeeper.ZKUtil(111): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b6b797fc3981,38041,1733473111442 2024-12-06T08:18:33,599 WARN [RS:0;b6b797fc3981:38041 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:18:33,599 INFO [RS:0;b6b797fc3981:38041 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T08:18:33,600 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/WALs/b6b797fc3981,38041,1733473111442 2024-12-06T08:18:33,601 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b6b797fc3981,38041,1733473111442] 2024-12-06T08:18:33,612 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T08:18:33,623 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:18:33,636 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:18:33,639 INFO [RS:0;b6b797fc3981:38041 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:18:33,639 INFO [RS:0;b6b797fc3981:38041 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,640 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T08:18:33,647 INFO [RS:0;b6b797fc3981:38041 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,647 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,647 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,648 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,648 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,648 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,648 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:18:33,648 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,649 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,649 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,649 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,649 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:18:33,650 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:18:33,650 DEBUG [RS:0;b6b797fc3981:38041 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:18:33,652 INFO [RS:0;b6b797fc3981:38041 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,652 INFO [RS:0;b6b797fc3981:38041 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,652 INFO [RS:0;b6b797fc3981:38041 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,652 INFO [RS:0;b6b797fc3981:38041 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,652 INFO [RS:0;b6b797fc3981:38041 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,38041,1733473111442-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:18:33,674 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:18:33,676 INFO [RS:0;b6b797fc3981:38041 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,38041,1733473111442-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:33,703 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.Replication(204): b6b797fc3981,38041,1733473111442 started 2024-12-06T08:18:33,703 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1767): Serving as b6b797fc3981,38041,1733473111442, RpcServer on b6b797fc3981/172.17.0.2:38041, sessionid=0x100666840130001 2024-12-06T08:18:33,704 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:18:33,704 DEBUG [RS:0;b6b797fc3981:38041 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b6b797fc3981,38041,1733473111442 2024-12-06T08:18:33,704 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,38041,1733473111442' 2024-12-06T08:18:33,704 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:18:33,705 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:18:33,706 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:18:33,706 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:18:33,706 DEBUG [RS:0;b6b797fc3981:38041 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b6b797fc3981,38041,1733473111442 2024-12-06T08:18:33,706 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,38041,1733473111442' 2024-12-06T08:18:33,706 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:18:33,707 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:18:33,708 DEBUG [RS:0;b6b797fc3981:38041 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:18:33,708 INFO [RS:0;b6b797fc3981:38041 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:18:33,708 INFO [RS:0;b6b797fc3981:38041 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:18:33,814 INFO [RS:0;b6b797fc3981:38041 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T08:18:33,818 INFO [RS:0;b6b797fc3981:38041 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C38041%2C1733473111442, suffix=, logDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/WALs/b6b797fc3981,38041,1733473111442, archiveDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/oldWALs, maxLogs=32 2024-12-06T08:18:33,835 DEBUG [RS:0;b6b797fc3981:38041 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/WALs/b6b797fc3981,38041,1733473111442/b6b797fc3981%2C38041%2C1733473111442.1733473113820, exclude list is [], retry=0 2024-12-06T08:18:33,840 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40801,DS-ad8931c2-01a4-4c7a-9f0a-20570fd0ac9c,DISK] 2024-12-06T08:18:33,844 INFO [RS:0;b6b797fc3981:38041 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/WALs/b6b797fc3981,38041,1733473111442/b6b797fc3981%2C38041%2C1733473111442.1733473113820 2024-12-06T08:18:33,844 DEBUG [RS:0;b6b797fc3981:38041 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39297:39297)] 2024-12-06T08:18:33,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:18:33,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:18:33,977 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:18:33,977 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:33,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:18:33,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:18:33,980 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:18:33,980 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:33,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:18:33,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:18:33,984 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:18:33,984 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:33,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:18:33,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740 2024-12-06T08:18:33,987 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740 2024-12-06T08:18:33,990 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:18:33,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:18:33,996 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:18:33,997 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63273627, jitterRate=-0.057149484753608704}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:18:33,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:18:33,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:18:34,000 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:18:34,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:18:34,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:18:34,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:18:34,001 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:18:34,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:18:34,004 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:18:34,004 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T08:18:34,010 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T08:18:34,018 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:18:34,020 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T08:18:34,172 DEBUG [b6b797fc3981:42303 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T08:18:34,176 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:34,181 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,38041,1733473111442, state=OPENING 2024-12-06T08:18:34,186 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T08:18:34,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:34,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:34,189 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:18:34,189 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:18:34,191 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:18:34,365 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:34,367 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T08:18:34,371 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T08:18:34,381 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T08:18:34,381 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T08:18:34,382 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-06T08:18:34,384 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C38041%2C1733473111442.meta, suffix=.meta, logDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/WALs/b6b797fc3981,38041,1733473111442, archiveDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/oldWALs, maxLogs=32 2024-12-06T08:18:34,401 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/WALs/b6b797fc3981,38041,1733473111442/b6b797fc3981%2C38041%2C1733473111442.meta.1733473114386.meta, exclude list is [], retry=0 2024-12-06T08:18:34,405 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40801,DS-ad8931c2-01a4-4c7a-9f0a-20570fd0ac9c,DISK] 2024-12-06T08:18:34,409 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/WALs/b6b797fc3981,38041,1733473111442/b6b797fc3981%2C38041%2C1733473111442.meta.1733473114386.meta 2024-12-06T08:18:34,409 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39297:39297)] 2024-12-06T08:18:34,410 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:18:34,411 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T08:18:34,472 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T08:18:34,477 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T08:18:34,482 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T08:18:34,482 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:18:34,482 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T08:18:34,482 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T08:18:34,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:18:34,487 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:18:34,487 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:34,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:18:34,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:18:34,490 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:18:34,490 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:34,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:18:34,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:18:34,492 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:18:34,492 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:34,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:18:34,495 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740 2024-12-06T08:18:34,497 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740 2024-12-06T08:18:34,500 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:18:34,502 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:18:34,504 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68995672, jitterRate=0.02811563014984131}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:18:34,505 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:18:34,513 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733473114359 2024-12-06T08:18:34,524 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T08:18:34,524 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T08:18:34,525 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:34,527 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,38041,1733473111442, state=OPEN 2024-12-06T08:18:34,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:18:34,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:18:34,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:18:34,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:18:34,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T08:18:34,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b6b797fc3981,38041,1733473111442 in 341 msec 2024-12-06T08:18:34,542 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T08:18:34,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 528 msec 2024-12-06T08:18:34,548 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.0930 sec 2024-12-06T08:18:34,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733473114548, completionTime=-1 2024-12-06T08:18:34,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T08:18:34,549 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T08:18:34,590 DEBUG [hconnection-0x5602a74-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:34,593 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41028, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:34,603 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T08:18:34,603 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733473174603 2024-12-06T08:18:34,603 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733473234603 2024-12-06T08:18:34,603 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 53 msec 2024-12-06T08:18:34,624 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42303,1733473110672-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:34,625 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42303,1733473110672-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:34,625 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42303,1733473110672-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:34,627 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b6b797fc3981:42303, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:34,627 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:34,632 DEBUG [master/b6b797fc3981:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T08:18:34,635 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T08:18:34,636 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:18:34,642 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T08:18:34,645 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:18:34,647 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:34,648 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:18:34,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:18:35,063 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ad6c9f95f0b663a6596ce60f0a457f00, NAME => 'hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156 2024-12-06T08:18:35,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:18:35,073 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:18:35,073 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing ad6c9f95f0b663a6596ce60f0a457f00, disabling compactions & flushes 2024-12-06T08:18:35,073 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:18:35,074 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:18:35,074 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. after waiting 0 ms 2024-12-06T08:18:35,074 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:18:35,074 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:18:35,074 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for ad6c9f95f0b663a6596ce60f0a457f00: 2024-12-06T08:18:35,076 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:18:35,082 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733473115077"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733473115077"}]},"ts":"1733473115077"} 2024-12-06T08:18:35,106 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:18:35,108 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:18:35,111 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473115108"}]},"ts":"1733473115108"} 2024-12-06T08:18:35,115 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T08:18:35,121 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ad6c9f95f0b663a6596ce60f0a457f00, ASSIGN}] 2024-12-06T08:18:35,124 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ad6c9f95f0b663a6596ce60f0a457f00, ASSIGN 2024-12-06T08:18:35,125 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=ad6c9f95f0b663a6596ce60f0a457f00, ASSIGN; state=OFFLINE, location=b6b797fc3981,38041,1733473111442; forceNewPlan=false, retain=false 2024-12-06T08:18:35,276 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ad6c9f95f0b663a6596ce60f0a457f00, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:35,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure ad6c9f95f0b663a6596ce60f0a457f00, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:18:35,434 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:35,441 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:18:35,441 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => ad6c9f95f0b663a6596ce60f0a457f00, NAME => 'hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:18:35,441 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:18:35,442 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:18:35,442 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:18:35,442 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:18:35,444 INFO [StoreOpener-ad6c9f95f0b663a6596ce60f0a457f00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:18:35,447 INFO [StoreOpener-ad6c9f95f0b663a6596ce60f0a457f00-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ad6c9f95f0b663a6596ce60f0a457f00 columnFamilyName info 2024-12-06T08:18:35,447 DEBUG [StoreOpener-ad6c9f95f0b663a6596ce60f0a457f00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:35,448 INFO [StoreOpener-ad6c9f95f0b663a6596ce60f0a457f00-1 {}] regionserver.HStore(327): Store=ad6c9f95f0b663a6596ce60f0a457f00/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:18:35,449 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/namespace/ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:18:35,450 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/namespace/ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:18:35,454 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:18:35,457 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/namespace/ad6c9f95f0b663a6596ce60f0a457f00/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:18:35,458 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened ad6c9f95f0b663a6596ce60f0a457f00; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66411069, jitterRate=-0.010397955775260925}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:18:35,460 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for ad6c9f95f0b663a6596ce60f0a457f00: 2024-12-06T08:18:35,462 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00., pid=6, masterSystemTime=1733473115434 2024-12-06T08:18:35,466 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:18:35,466 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:18:35,467 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ad6c9f95f0b663a6596ce60f0a457f00, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:35,475 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T08:18:35,476 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure ad6c9f95f0b663a6596ce60f0a457f00, server=b6b797fc3981,38041,1733473111442 in 191 msec 2024-12-06T08:18:35,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T08:18:35,479 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=ad6c9f95f0b663a6596ce60f0a457f00, ASSIGN in 354 msec 2024-12-06T08:18:35,480 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:18:35,480 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473115480"}]},"ts":"1733473115480"} 2024-12-06T08:18:35,483 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T08:18:35,486 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:18:35,489 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 849 msec 2024-12-06T08:18:35,546 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T08:18:35,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:35,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:18:35,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:18:35,576 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T08:18:35,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:18:35,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 22 msec 2024-12-06T08:18:35,600 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T08:18:35,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:18:35,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 14 msec 2024-12-06T08:18:35,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T08:18:35,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T08:18:35,629 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.112sec 2024-12-06T08:18:35,630 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T08:18:35,632 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T08:18:35,632 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T08:18:35,633 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T08:18:35,633 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T08:18:35,634 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42303,1733473110672-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:18:35,634 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42303,1733473110672-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T08:18:35,641 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T08:18:35,642 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T08:18:35,642 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42303,1733473110672-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:18:35,698 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3771e354 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38630296 2024-12-06T08:18:35,699 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-06T08:18:35,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6321da62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:35,710 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-06T08:18:35,710 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-06T08:18:35,719 DEBUG [hconnection-0x13adb0ff-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:35,729 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41034, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:35,739 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b6b797fc3981,42303,1733473110672 2024-12-06T08:18:35,756 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=152, ProcessCount=11, AvailableMemoryMB=8104 2024-12-06T08:18:35,766 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:18:35,769 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41092, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:18:35,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T08:18:35,801 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:18:35,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T08:18:35,805 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:18:35,806 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:35,808 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:18:35,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-06T08:18:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:18:35,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741837_1013 (size=963) 2024-12-06T08:18:35,841 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156 2024-12-06T08:18:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741838_1014 (size=53) 2024-12-06T08:18:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:18:36,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:18:36,256 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:18:36,257 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 27b39638a4e82980fa51c6694c44d0ad, disabling compactions & flushes 2024-12-06T08:18:36,257 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:36,257 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:36,257 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. after waiting 0 ms 2024-12-06T08:18:36,257 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:36,257 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:36,257 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:36,259 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:18:36,259 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733473116259"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733473116259"}]},"ts":"1733473116259"} 2024-12-06T08:18:36,262 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:18:36,264 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:18:36,264 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473116264"}]},"ts":"1733473116264"} 2024-12-06T08:18:36,266 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T08:18:36,270 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=27b39638a4e82980fa51c6694c44d0ad, ASSIGN}] 2024-12-06T08:18:36,272 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=27b39638a4e82980fa51c6694c44d0ad, ASSIGN 2024-12-06T08:18:36,273 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=27b39638a4e82980fa51c6694c44d0ad, ASSIGN; state=OFFLINE, location=b6b797fc3981,38041,1733473111442; forceNewPlan=false, retain=false 2024-12-06T08:18:36,424 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=27b39638a4e82980fa51c6694c44d0ad, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:18:36,427 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:18:36,581 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:36,587 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:36,587 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:18:36,588 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:36,588 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:18:36,588 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:36,588 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:36,591 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:36,594 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:18:36,594 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 27b39638a4e82980fa51c6694c44d0ad columnFamilyName A 2024-12-06T08:18:36,594 DEBUG [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:36,595 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] regionserver.HStore(327): Store=27b39638a4e82980fa51c6694c44d0ad/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:18:36,595 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:36,597 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:18:36,598 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 27b39638a4e82980fa51c6694c44d0ad columnFamilyName B 2024-12-06T08:18:36,598 DEBUG [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:36,599 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] regionserver.HStore(327): Store=27b39638a4e82980fa51c6694c44d0ad/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:18:36,599 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:36,601 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:18:36,601 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 27b39638a4e82980fa51c6694c44d0ad columnFamilyName C 2024-12-06T08:18:36,602 DEBUG [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:18:36,602 INFO [StoreOpener-27b39638a4e82980fa51c6694c44d0ad-1 {}] regionserver.HStore(327): Store=27b39638a4e82980fa51c6694c44d0ad/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:18:36,603 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:36,604 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:36,604 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:36,607 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:18:36,609 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:36,612 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:18:36,613 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 27b39638a4e82980fa51c6694c44d0ad; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71637977, jitterRate=0.06748904287815094}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:18:36,615 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:36,616 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., pid=11, masterSystemTime=1733473116581 2024-12-06T08:18:36,619 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:36,619 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:36,620 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=27b39638a4e82980fa51c6694c44d0ad, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:36,626 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T08:18:36,627 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 in 195 msec 2024-12-06T08:18:36,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T08:18:36,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=27b39638a4e82980fa51c6694c44d0ad, ASSIGN in 356 msec 2024-12-06T08:18:36,630 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:18:36,631 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473116631"}]},"ts":"1733473116631"} 2024-12-06T08:18:36,633 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T08:18:36,638 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:18:36,640 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 837 msec 2024-12-06T08:18:36,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:18:36,929 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-06T08:18:36,933 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63607639 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e67f019 2024-12-06T08:18:36,938 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fcb5f29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:36,940 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:36,942 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41038, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:36,945 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:18:36,947 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41098, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:18:36,953 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53b8a93e to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5095ba91 2024-12-06T08:18:36,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f2091cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:36,959 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-12-06T08:18:36,962 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22cb07dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:36,964 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1324ee83 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62c43377 2024-12-06T08:18:36,967 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:36,968 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-12-06T08:18:36,971 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5400112e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:36,973 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x048068a5 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a8f4734 2024-12-06T08:18:36,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:36,978 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b5f27aa to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10c964e8 2024-12-06T08:18:36,982 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9ed28bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:36,983 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-12-06T08:18:36,987 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:36,989 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x490457fd to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@527c6d40 2024-12-06T08:18:36,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:36,993 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-12-06T08:18:36,996 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:18:37,002 DEBUG [hconnection-0x45f0f0a5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:37,003 DEBUG [hconnection-0x4b613b0d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:37,004 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41044, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:37,005 DEBUG [hconnection-0x5dc12106-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:37,006 DEBUG [hconnection-0x778c0741-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:37,008 DEBUG [hconnection-0x4fd12293-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:37,009 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:37,012 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:18:37,012 DEBUG [hconnection-0x3b20eaa4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:37,013 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:37,015 DEBUG [hconnection-0x5f7a19bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:37,017 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41072, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:37,017 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:37,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-06T08:18:37,020 DEBUG [hconnection-0x6313e8f1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:37,020 DEBUG [hconnection-0x176fe140-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:18:37,021 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:18:37,023 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41086, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:37,024 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:37,027 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:37,027 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:18:37,029 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:18:37,031 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41110, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:18:37,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T08:18:37,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:37,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:18:37,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:37,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:37,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:37,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:37,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:37,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T08:18:37,199 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:37,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:37,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:37,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:37,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/92a40c9b3e584983a64d78e2a0549523 is 50, key is test_row_0/A:col10/1733473117108/Put/seqid=0 2024-12-06T08:18:37,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741839_1015 (size=12001) 2024-12-06T08:18:37,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473177285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473177285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473177285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473177291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473177291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T08:18:37,428 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:37,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473177436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473177437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473177439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473177442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:37,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:37,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:37,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473177439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,599 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:37,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:37,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:37,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:37,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T08:18:37,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473177654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473177655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473177656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473177656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473177658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/92a40c9b3e584983a64d78e2a0549523 2024-12-06T08:18:37,765 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:37,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:37,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:37,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:37,767 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/0927a4e4d7d94c53a3ccf70a16a3e7a0 is 50, key is test_row_0/B:col10/1733473117108/Put/seqid=0 2024-12-06T08:18:37,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741840_1016 (size=12001) 2024-12-06T08:18:37,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/0927a4e4d7d94c53a3ccf70a16a3e7a0 2024-12-06T08:18:37,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/7a6480539522489faf7d6b481b3fefb7 is 50, key is test_row_0/C:col10/1733473117108/Put/seqid=0 2024-12-06T08:18:37,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741841_1017 (size=12001) 2024-12-06T08:18:37,911 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/7a6480539522489faf7d6b481b3fefb7 2024-12-06T08:18:37,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/92a40c9b3e584983a64d78e2a0549523 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/92a40c9b3e584983a64d78e2a0549523 2024-12-06T08:18:37,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:37,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:37,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:37,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:37,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:37,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/92a40c9b3e584983a64d78e2a0549523, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T08:18:37,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/0927a4e4d7d94c53a3ccf70a16a3e7a0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/0927a4e4d7d94c53a3ccf70a16a3e7a0 2024-12-06T08:18:37,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/0927a4e4d7d94c53a3ccf70a16a3e7a0, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T08:18:37,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/7a6480539522489faf7d6b481b3fefb7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7a6480539522489faf7d6b481b3fefb7 2024-12-06T08:18:37,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473177963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:37,985 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7a6480539522489faf7d6b481b3fefb7, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T08:18:37,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 27b39638a4e82980fa51c6694c44d0ad in 878ms, sequenceid=13, compaction requested=false 2024-12-06T08:18:37,989 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-06T08:18:37,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:37,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:18:37,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:37,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:37,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:37,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:37,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:37,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:38,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/b5b1735e7b314978947e059aec313068 is 50, key is test_row_0/A:col10/1733473117987/Put/seqid=0 2024-12-06T08:18:38,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473178030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473178035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473178039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473178040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741842_1018 (size=14341) 2024-12-06T08:18:38,082 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:38,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:38,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473178139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T08:18:38,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473178143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473178154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473178152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,237 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:38,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:38,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473178353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473178362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473178364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473178365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,395 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:38,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:38,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/b5b1735e7b314978947e059aec313068 2024-12-06T08:18:38,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473178492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/81cef35dd28f48c1a15f8e3295301fb7 is 50, key is test_row_0/B:col10/1733473117987/Put/seqid=0 2024-12-06T08:18:38,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741843_1019 (size=12001) 2024-12-06T08:18:38,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/81cef35dd28f48c1a15f8e3295301fb7 2024-12-06T08:18:38,548 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/fe44fb8e0bf04591ba61f81fe6b69a1c is 50, key is test_row_0/C:col10/1733473117987/Put/seqid=0 2024-12-06T08:18:38,551 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:38,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:38,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741844_1020 (size=12001) 2024-12-06T08:18:38,585 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/fe44fb8e0bf04591ba61f81fe6b69a1c 2024-12-06T08:18:38,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/b5b1735e7b314978947e059aec313068 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/b5b1735e7b314978947e059aec313068 2024-12-06T08:18:38,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/b5b1735e7b314978947e059aec313068, entries=200, sequenceid=38, filesize=14.0 K 2024-12-06T08:18:38,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/81cef35dd28f48c1a15f8e3295301fb7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/81cef35dd28f48c1a15f8e3295301fb7 2024-12-06T08:18:38,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/81cef35dd28f48c1a15f8e3295301fb7, entries=150, sequenceid=38, filesize=11.7 K 2024-12-06T08:18:38,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/fe44fb8e0bf04591ba61f81fe6b69a1c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/fe44fb8e0bf04591ba61f81fe6b69a1c 2024-12-06T08:18:38,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/fe44fb8e0bf04591ba61f81fe6b69a1c, entries=150, sequenceid=38, filesize=11.7 K 2024-12-06T08:18:38,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 27b39638a4e82980fa51c6694c44d0ad in 656ms, sequenceid=38, compaction requested=false 2024-12-06T08:18:38,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:38,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-06T08:18:38,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:38,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:38,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:38,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:38,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:38,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:38,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:38,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/577eb67e69aa4d1795745a5141183895 is 50, key is test_row_0/A:col10/1733473118020/Put/seqid=0 2024-12-06T08:18:38,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:38,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:38,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,708 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741845_1021 (size=14341) 2024-12-06T08:18:38,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473178733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473178735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473178744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473178747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473178850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473178858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473178860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,863 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473178861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:38,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:38,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:38,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:38,865 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:38,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:39,018 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:39,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:39,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:39,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:39,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:39,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:39,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473179054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473179062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473179067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473179068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/577eb67e69aa4d1795745a5141183895 2024-12-06T08:18:39,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/c0212401db2044c1b29ec99531172de7 is 50, key is test_row_0/B:col10/1733473118020/Put/seqid=0 2024-12-06T08:18:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T08:18:39,174 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:39,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:39,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:39,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:39,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:39,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:39,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741846_1022 (size=12001) 2024-12-06T08:18:39,186 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/c0212401db2044c1b29ec99531172de7 2024-12-06T08:18:39,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/b3d0cca8dc8c4cafbc3d22e34bbb4ce3 is 50, key is test_row_0/C:col10/1733473118020/Put/seqid=0 2024-12-06T08:18:39,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741847_1023 (size=12001) 2024-12-06T08:18:39,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/b3d0cca8dc8c4cafbc3d22e34bbb4ce3 2024-12-06T08:18:39,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/577eb67e69aa4d1795745a5141183895 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/577eb67e69aa4d1795745a5141183895 2024-12-06T08:18:39,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/577eb67e69aa4d1795745a5141183895, entries=200, sequenceid=52, filesize=14.0 K 2024-12-06T08:18:39,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/c0212401db2044c1b29ec99531172de7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c0212401db2044c1b29ec99531172de7 2024-12-06T08:18:39,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c0212401db2044c1b29ec99531172de7, entries=150, sequenceid=52, filesize=11.7 K 2024-12-06T08:18:39,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/b3d0cca8dc8c4cafbc3d22e34bbb4ce3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/b3d0cca8dc8c4cafbc3d22e34bbb4ce3 2024-12-06T08:18:39,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/b3d0cca8dc8c4cafbc3d22e34bbb4ce3, entries=150, sequenceid=52, filesize=11.7 K 2024-12-06T08:18:39,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 27b39638a4e82980fa51c6694c44d0ad in 651ms, sequenceid=52, compaction requested=true 2024-12-06T08:18:39,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:39,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:39,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:39,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:39,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:39,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:39,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:39,328 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:39,328 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:39,330 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:18:39,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:39,332 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T08:18:39,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:39,333 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:39,335 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:39,335 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:39,336 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/0927a4e4d7d94c53a3ccf70a16a3e7a0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/81cef35dd28f48c1a15f8e3295301fb7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c0212401db2044c1b29ec99531172de7] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=35.2 K 2024-12-06T08:18:39,337 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 0927a4e4d7d94c53a3ccf70a16a3e7a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733473117055 2024-12-06T08:18:39,338 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 81cef35dd28f48c1a15f8e3295301fb7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733473117240 2024-12-06T08:18:39,339 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting c0212401db2044c1b29ec99531172de7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733473118020 2024-12-06T08:18:39,339 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:39,341 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:39,341 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:39,341 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/92a40c9b3e584983a64d78e2a0549523, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/b5b1735e7b314978947e059aec313068, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/577eb67e69aa4d1795745a5141183895] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=39.7 K 2024-12-06T08:18:39,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/bd77b7f27cae417cb6e3bbe0766fd31f is 50, key is test_row_0/A:col10/1733473118726/Put/seqid=0 2024-12-06T08:18:39,342 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92a40c9b3e584983a64d78e2a0549523, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733473117055 2024-12-06T08:18:39,344 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5b1735e7b314978947e059aec313068, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733473117240 2024-12-06T08:18:39,345 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 577eb67e69aa4d1795745a5141183895, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733473118020 2024-12-06T08:18:39,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741848_1024 (size=12001) 2024-12-06T08:18:39,364 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/bd77b7f27cae417cb6e3bbe0766fd31f 2024-12-06T08:18:39,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:39,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:39,386 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#10 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:39,387 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/f6bbcffa144b4d1fb3b6cb2a21e9f2da is 50, key is test_row_0/B:col10/1733473118020/Put/seqid=0 2024-12-06T08:18:39,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/c611b7ef7c354a7f9b873ac597bbbfde is 50, key is test_row_0/B:col10/1733473118726/Put/seqid=0 2024-12-06T08:18:39,393 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#12 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:39,394 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/56bbd8bb5fee4cdba0f7cdf26fa562b5 is 50, key is test_row_0/A:col10/1733473118020/Put/seqid=0 2024-12-06T08:18:39,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473179419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473179425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473179422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741850_1026 (size=12001) 2024-12-06T08:18:39,443 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/c611b7ef7c354a7f9b873ac597bbbfde 2024-12-06T08:18:39,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473179437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741849_1025 (size=12104) 2024-12-06T08:18:39,471 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/f6bbcffa144b4d1fb3b6cb2a21e9f2da as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f6bbcffa144b4d1fb3b6cb2a21e9f2da 2024-12-06T08:18:39,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741851_1027 (size=12104) 2024-12-06T08:18:39,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/2cb748387ad64a6985f305cf387bff1a is 50, key is test_row_0/C:col10/1733473118726/Put/seqid=0 2024-12-06T08:18:39,498 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/56bbd8bb5fee4cdba0f7cdf26fa562b5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/56bbd8bb5fee4cdba0f7cdf26fa562b5 2024-12-06T08:18:39,498 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into f6bbcffa144b4d1fb3b6cb2a21e9f2da(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:39,499 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:39,499 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=13, startTime=1733473119328; duration=0sec 2024-12-06T08:18:39,499 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:39,499 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:39,499 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:39,505 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:39,506 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:39,506 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:39,507 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7a6480539522489faf7d6b481b3fefb7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/fe44fb8e0bf04591ba61f81fe6b69a1c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/b3d0cca8dc8c4cafbc3d22e34bbb4ce3] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=35.2 K 2024-12-06T08:18:39,508 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a6480539522489faf7d6b481b3fefb7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733473117055 2024-12-06T08:18:39,510 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting fe44fb8e0bf04591ba61f81fe6b69a1c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733473117240 2024-12-06T08:18:39,511 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting b3d0cca8dc8c4cafbc3d22e34bbb4ce3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733473118020 2024-12-06T08:18:39,513 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into 56bbd8bb5fee4cdba0f7cdf26fa562b5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:39,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,513 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:39,513 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=13, startTime=1733473119323; duration=0sec 2024-12-06T08:18:39,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473179510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,514 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:39,514 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:39,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741852_1028 (size=12001) 2024-12-06T08:18:39,533 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/2cb748387ad64a6985f305cf387bff1a 2024-12-06T08:18:39,547 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#14 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:39,549 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/e19ee0e0c78b4f57beb73ddb9f6e69a0 is 50, key is test_row_0/C:col10/1733473118020/Put/seqid=0 2024-12-06T08:18:39,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473179541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473179541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473179541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/bd77b7f27cae417cb6e3bbe0766fd31f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bd77b7f27cae417cb6e3bbe0766fd31f 2024-12-06T08:18:39,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473179546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,570 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bd77b7f27cae417cb6e3bbe0766fd31f, entries=150, sequenceid=74, filesize=11.7 K 2024-12-06T08:18:39,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/c611b7ef7c354a7f9b873ac597bbbfde as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c611b7ef7c354a7f9b873ac597bbbfde 2024-12-06T08:18:39,583 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c611b7ef7c354a7f9b873ac597bbbfde, entries=150, sequenceid=74, filesize=11.7 K 2024-12-06T08:18:39,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/2cb748387ad64a6985f305cf387bff1a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/2cb748387ad64a6985f305cf387bff1a 2024-12-06T08:18:39,597 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/2cb748387ad64a6985f305cf387bff1a, entries=150, sequenceid=74, filesize=11.7 K 2024-12-06T08:18:39,599 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 27b39638a4e82980fa51c6694c44d0ad in 266ms, sequenceid=74, compaction requested=false 2024-12-06T08:18:39,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:39,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:39,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-06T08:18:39,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-06T08:18:39,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741853_1029 (size=12104) 2024-12-06T08:18:39,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-06T08:18:39,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5720 sec 2024-12-06T08:18:39,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.5910 sec 2024-12-06T08:18:39,621 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/e19ee0e0c78b4f57beb73ddb9f6e69a0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/e19ee0e0c78b4f57beb73ddb9f6e69a0 2024-12-06T08:18:39,634 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into e19ee0e0c78b4f57beb73ddb9f6e69a0(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:39,635 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:39,635 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=13, startTime=1733473119328; duration=0sec 2024-12-06T08:18:39,635 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:39,635 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:39,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:39,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T08:18:39,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:39,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:39,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:39,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:39,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:39,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:39,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/c48eb88fb3974965979ffad62b3233f7 is 50, key is test_row_0/A:col10/1733473119433/Put/seqid=0 2024-12-06T08:18:39,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741854_1030 (size=12001) 2024-12-06T08:18:39,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/c48eb88fb3974965979ffad62b3233f7 2024-12-06T08:18:39,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/d0feade4188a4537bec767349718f8c0 is 50, key is test_row_0/B:col10/1733473119433/Put/seqid=0 2024-12-06T08:18:39,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473179832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741855_1031 (size=12001) 2024-12-06T08:18:39,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473179838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473179837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473179842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473179945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473179954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473179955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473179954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:39,996 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T08:18:40,082 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T08:18:40,084 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T08:18:40,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473180151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473180158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473180159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473180160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,251 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/d0feade4188a4537bec767349718f8c0 2024-12-06T08:18:40,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/68892efa49294e9d88e5fc634d4b330f is 50, key is test_row_0/C:col10/1733473119433/Put/seqid=0 2024-12-06T08:18:40,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741856_1032 (size=12001) 2024-12-06T08:18:40,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/68892efa49294e9d88e5fc634d4b330f 2024-12-06T08:18:40,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/c48eb88fb3974965979ffad62b3233f7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c48eb88fb3974965979ffad62b3233f7 2024-12-06T08:18:40,336 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c48eb88fb3974965979ffad62b3233f7, entries=150, sequenceid=93, filesize=11.7 K 2024-12-06T08:18:40,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/d0feade4188a4537bec767349718f8c0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d0feade4188a4537bec767349718f8c0 2024-12-06T08:18:40,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d0feade4188a4537bec767349718f8c0, entries=150, sequenceid=93, filesize=11.7 K 2024-12-06T08:18:40,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/68892efa49294e9d88e5fc634d4b330f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/68892efa49294e9d88e5fc634d4b330f 2024-12-06T08:18:40,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/68892efa49294e9d88e5fc634d4b330f, entries=150, sequenceid=93, filesize=11.7 K 2024-12-06T08:18:40,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 27b39638a4e82980fa51c6694c44d0ad in 603ms, sequenceid=93, compaction requested=true 2024-12-06T08:18:40,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:40,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:40,367 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:40,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:40,368 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:40,370 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:40,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:40,370 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:40,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:40,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:40,370 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:40,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:40,371 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/56bbd8bb5fee4cdba0f7cdf26fa562b5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bd77b7f27cae417cb6e3bbe0766fd31f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c48eb88fb3974965979ffad62b3233f7] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=35.3 K 2024-12-06T08:18:40,371 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:40,371 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:40,371 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:40,372 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f6bbcffa144b4d1fb3b6cb2a21e9f2da, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c611b7ef7c354a7f9b873ac597bbbfde, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d0feade4188a4537bec767349718f8c0] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=35.3 K 2024-12-06T08:18:40,372 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56bbd8bb5fee4cdba0f7cdf26fa562b5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733473118020 2024-12-06T08:18:40,373 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f6bbcffa144b4d1fb3b6cb2a21e9f2da, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733473118020 2024-12-06T08:18:40,373 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd77b7f27cae417cb6e3bbe0766fd31f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733473118726 2024-12-06T08:18:40,375 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c48eb88fb3974965979ffad62b3233f7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733473119422 2024-12-06T08:18:40,375 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting c611b7ef7c354a7f9b873ac597bbbfde, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733473118726 2024-12-06T08:18:40,376 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting d0feade4188a4537bec767349718f8c0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733473119422 2024-12-06T08:18:40,411 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#19 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:40,412 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/90b500a6755f47509a1f158cdffe2726 is 50, key is test_row_0/B:col10/1733473119433/Put/seqid=0 2024-12-06T08:18:40,413 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#18 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:40,414 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/d00d063c68b742f2ac036e903e3f0719 is 50, key is test_row_0/A:col10/1733473119433/Put/seqid=0 2024-12-06T08:18:40,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741857_1033 (size=12207) 2024-12-06T08:18:40,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:40,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-06T08:18:40,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:40,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:40,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:40,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:40,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:40,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:40,472 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/d00d063c68b742f2ac036e903e3f0719 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d00d063c68b742f2ac036e903e3f0719 2024-12-06T08:18:40,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741858_1034 (size=12207) 2024-12-06T08:18:40,489 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/90b500a6755f47509a1f158cdffe2726 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/90b500a6755f47509a1f158cdffe2726 2024-12-06T08:18:40,492 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/5ecf6fe96eac44228ca6b44bd9f07dbd is 50, key is test_row_0/A:col10/1733473119831/Put/seqid=0 2024-12-06T08:18:40,494 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into d00d063c68b742f2ac036e903e3f0719(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:40,494 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:40,494 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=13, startTime=1733473120367; duration=0sec 2024-12-06T08:18:40,494 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:40,494 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:40,495 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:40,497 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:40,497 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:40,497 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:40,497 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/e19ee0e0c78b4f57beb73ddb9f6e69a0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/2cb748387ad64a6985f305cf387bff1a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/68892efa49294e9d88e5fc634d4b330f] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=35.3 K 2024-12-06T08:18:40,499 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting e19ee0e0c78b4f57beb73ddb9f6e69a0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733473118020 2024-12-06T08:18:40,500 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cb748387ad64a6985f305cf387bff1a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733473118726 2024-12-06T08:18:40,502 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68892efa49294e9d88e5fc634d4b330f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733473119422 2024-12-06T08:18:40,510 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into 90b500a6755f47509a1f158cdffe2726(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:40,510 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:40,510 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=13, startTime=1733473120367; duration=0sec 2024-12-06T08:18:40,510 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:40,511 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:40,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473180519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741859_1035 (size=14341) 2024-12-06T08:18:40,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473180519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/5ecf6fe96eac44228ca6b44bd9f07dbd 2024-12-06T08:18:40,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473180522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,533 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:40,534 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/07209012e10540bca294f81337200d73 is 50, key is test_row_0/C:col10/1733473119433/Put/seqid=0 2024-12-06T08:18:40,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473180524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e0fe94aa1e84480b90d429e7ec3920a2 is 50, key is test_row_0/B:col10/1733473119831/Put/seqid=0 2024-12-06T08:18:40,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741860_1036 (size=12207) 2024-12-06T08:18:40,586 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/07209012e10540bca294f81337200d73 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/07209012e10540bca294f81337200d73 2024-12-06T08:18:40,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741861_1037 (size=12001) 2024-12-06T08:18:40,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e0fe94aa1e84480b90d429e7ec3920a2 2024-12-06T08:18:40,607 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into 07209012e10540bca294f81337200d73(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:40,609 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:40,609 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=13, startTime=1733473120370; duration=0sec 2024-12-06T08:18:40,609 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:40,609 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:40,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/7f6224964bb34571935e006c4899ebe0 is 50, key is test_row_0/C:col10/1733473119831/Put/seqid=0 2024-12-06T08:18:40,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473180627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473180629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473180636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473180640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741862_1038 (size=12001) 2024-12-06T08:18:40,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/7f6224964bb34571935e006c4899ebe0 2024-12-06T08:18:40,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/5ecf6fe96eac44228ca6b44bd9f07dbd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/5ecf6fe96eac44228ca6b44bd9f07dbd 2024-12-06T08:18:40,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/5ecf6fe96eac44228ca6b44bd9f07dbd, entries=200, sequenceid=114, filesize=14.0 K 2024-12-06T08:18:40,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e0fe94aa1e84480b90d429e7ec3920a2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0fe94aa1e84480b90d429e7ec3920a2 2024-12-06T08:18:40,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0fe94aa1e84480b90d429e7ec3920a2, entries=150, sequenceid=114, filesize=11.7 K 2024-12-06T08:18:40,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/7f6224964bb34571935e006c4899ebe0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7f6224964bb34571935e006c4899ebe0 2024-12-06T08:18:40,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7f6224964bb34571935e006c4899ebe0, entries=150, sequenceid=114, filesize=11.7 K 2024-12-06T08:18:40,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 27b39638a4e82980fa51c6694c44d0ad in 261ms, sequenceid=114, compaction requested=false 2024-12-06T08:18:40,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:40,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:40,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-06T08:18:40,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:40,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:40,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:40,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:40,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:40,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:40,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/9c594a46eaca4c89a29a9d383f2a2ac1 is 50, key is test_row_0/A:col10/1733473120837/Put/seqid=0 2024-12-06T08:18:40,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473180887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473180895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741863_1039 (size=16881) 2024-12-06T08:18:40,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473180901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/9c594a46eaca4c89a29a9d383f2a2ac1 2024-12-06T08:18:40,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:40,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473180901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:40,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/9d66f6f7d58242b0ae9112d6159cf88f is 50, key is test_row_0/B:col10/1733473120837/Put/seqid=0 2024-12-06T08:18:40,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741864_1040 (size=12101) 2024-12-06T08:18:41,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473180997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473181005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473181011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473181012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T08:18:41,162 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-06T08:18:41,165 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:18:41,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-06T08:18:41,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T08:18:41,173 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:18:41,175 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:18:41,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:18:41,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T08:18:41,197 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-06T08:18:41,199 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-06T08:18:41,199 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-06T08:18:41,201 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T08:18:41,201 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-06T08:18:41,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T08:18:41,202 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-06T08:18:41,203 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-06T08:18:41,203 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-06T08:18:41,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473181206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473181209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473181216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473181219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T08:18:41,330 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T08:18:41,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:41,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/9d66f6f7d58242b0ae9112d6159cf88f 2024-12-06T08:18:41,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/91815d75eb4f4b30a59ee42eb1ad98cf is 50, key is test_row_0/C:col10/1733473120837/Put/seqid=0 2024-12-06T08:18:41,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741865_1041 (size=12101) 2024-12-06T08:18:41,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/91815d75eb4f4b30a59ee42eb1ad98cf 2024-12-06T08:18:41,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T08:18:41,485 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T08:18:41,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:41,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/9c594a46eaca4c89a29a9d383f2a2ac1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9c594a46eaca4c89a29a9d383f2a2ac1 2024-12-06T08:18:41,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9c594a46eaca4c89a29a9d383f2a2ac1, entries=250, sequenceid=134, filesize=16.5 K 2024-12-06T08:18:41,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/9d66f6f7d58242b0ae9112d6159cf88f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/9d66f6f7d58242b0ae9112d6159cf88f 2024-12-06T08:18:41,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473181510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/9d66f6f7d58242b0ae9112d6159cf88f, entries=150, sequenceid=134, filesize=11.8 K 2024-12-06T08:18:41,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/91815d75eb4f4b30a59ee42eb1ad98cf as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/91815d75eb4f4b30a59ee42eb1ad98cf 2024-12-06T08:18:41,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473181516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473181521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/91815d75eb4f4b30a59ee42eb1ad98cf, entries=150, sequenceid=134, filesize=11.8 K 2024-12-06T08:18:41,528 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 27b39638a4e82980fa51c6694c44d0ad in 689ms, sequenceid=134, compaction requested=true 2024-12-06T08:18:41,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:41,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:41,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:41,529 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:41,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:41,529 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:41,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:41,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:41,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:41,531 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43429 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:41,531 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:41,531 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,531 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:41,531 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:41,531 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d00d063c68b742f2ac036e903e3f0719, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/5ecf6fe96eac44228ca6b44bd9f07dbd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9c594a46eaca4c89a29a9d383f2a2ac1] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=42.4 K 2024-12-06T08:18:41,532 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,532 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/90b500a6755f47509a1f158cdffe2726, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0fe94aa1e84480b90d429e7ec3920a2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/9d66f6f7d58242b0ae9112d6159cf88f] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=35.5 K 2024-12-06T08:18:41,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:41,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-06T08:18:41,532 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting d00d063c68b742f2ac036e903e3f0719, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733473119422 2024-12-06T08:18:41,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:41,532 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 90b500a6755f47509a1f158cdffe2726, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733473119422 2024-12-06T08:18:41,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:41,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:41,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:41,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:41,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:41,534 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ecf6fe96eac44228ca6b44bd9f07dbd, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733473119831 2024-12-06T08:18:41,535 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e0fe94aa1e84480b90d429e7ec3920a2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733473119831 2024-12-06T08:18:41,535 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c594a46eaca4c89a29a9d383f2a2ac1, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733473120515 2024-12-06T08:18:41,536 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d66f6f7d58242b0ae9112d6159cf88f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733473120515 2024-12-06T08:18:41,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/cc62b5ec5e59450cb67679fc9fa8fb1a is 50, key is test_row_0/A:col10/1733473120888/Put/seqid=0 2024-12-06T08:18:41,559 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#28 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:41,560 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/fb911496ad844ae186b4667cea554b2b is 50, key is test_row_0/B:col10/1733473120837/Put/seqid=0 2024-12-06T08:18:41,568 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#29 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:41,569 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/62609bbe693b4a81ac379173da05d75a is 50, key is test_row_0/A:col10/1733473120837/Put/seqid=0 2024-12-06T08:18:41,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741866_1042 (size=14541) 2024-12-06T08:18:41,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741868_1044 (size=12409) 2024-12-06T08:18:41,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741867_1043 (size=12409) 2024-12-06T08:18:41,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473181586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473181588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,602 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/fb911496ad844ae186b4667cea554b2b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fb911496ad844ae186b4667cea554b2b 2024-12-06T08:18:41,612 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into fb911496ad844ae186b4667cea554b2b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:41,612 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:41,612 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=13, startTime=1733473121529; duration=0sec 2024-12-06T08:18:41,612 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:41,612 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:41,612 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:41,614 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:41,614 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:41,614 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,614 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/07209012e10540bca294f81337200d73, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7f6224964bb34571935e006c4899ebe0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/91815d75eb4f4b30a59ee42eb1ad98cf] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=35.5 K 2024-12-06T08:18:41,616 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 07209012e10540bca294f81337200d73, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733473119422 2024-12-06T08:18:41,617 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f6224964bb34571935e006c4899ebe0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733473119831 2024-12-06T08:18:41,618 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 91815d75eb4f4b30a59ee42eb1ad98cf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733473120515 2024-12-06T08:18:41,640 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T08:18:41,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:41,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,642 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:41,643 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/67eb73d2620d4493ad8ffd8aafb26a8c is 50, key is test_row_0/C:col10/1733473120837/Put/seqid=0 2024-12-06T08:18:41,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741869_1045 (size=12409) 2024-12-06T08:18:41,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473181695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473181694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T08:18:41,797 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T08:18:41,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:41,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473181899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:41,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473181900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,952 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:41,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T08:18:41,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:41,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:41,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:41,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/cc62b5ec5e59450cb67679fc9fa8fb1a 2024-12-06T08:18:42,000 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/62609bbe693b4a81ac379173da05d75a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/62609bbe693b4a81ac379173da05d75a 2024-12-06T08:18:42,016 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into 62609bbe693b4a81ac379173da05d75a(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:42,016 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:42,016 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=13, startTime=1733473121529; duration=0sec 2024-12-06T08:18:42,016 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:42,016 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:42,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e196f676be4543288077eaaa86c824ca is 50, key is test_row_0/B:col10/1733473120888/Put/seqid=0 2024-12-06T08:18:42,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:42,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473182019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741870_1046 (size=12151) 2024-12-06T08:18:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473182029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473182029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,097 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/67eb73d2620d4493ad8ffd8aafb26a8c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/67eb73d2620d4493ad8ffd8aafb26a8c 2024-12-06T08:18:42,108 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T08:18:42,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:42,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:42,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:42,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:42,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:42,110 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into 67eb73d2620d4493ad8ffd8aafb26a8c(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:42,111 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:42,111 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=13, startTime=1733473121529; duration=0sec 2024-12-06T08:18:42,111 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:42,111 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:42,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:42,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:42,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473182203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:42,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473182205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T08:18:42,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:42,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:42,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:42,265 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:42,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T08:18:42,418 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T08:18:42,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:42,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:42,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:42,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:42,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:42,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e196f676be4543288077eaaa86c824ca 2024-12-06T08:18:42,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/7bb18aeb609f457abe5cfb303fd33622 is 50, key is test_row_0/C:col10/1733473120888/Put/seqid=0 2024-12-06T08:18:42,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741871_1047 (size=12151) 2024-12-06T08:18:42,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/7bb18aeb609f457abe5cfb303fd33622 2024-12-06T08:18:42,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/cc62b5ec5e59450cb67679fc9fa8fb1a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/cc62b5ec5e59450cb67679fc9fa8fb1a 2024-12-06T08:18:42,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/cc62b5ec5e59450cb67679fc9fa8fb1a, entries=200, sequenceid=155, filesize=14.2 K 2024-12-06T08:18:42,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e196f676be4543288077eaaa86c824ca as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e196f676be4543288077eaaa86c824ca 2024-12-06T08:18:42,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e196f676be4543288077eaaa86c824ca, entries=150, sequenceid=155, filesize=11.9 K 2024-12-06T08:18:42,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/7bb18aeb609f457abe5cfb303fd33622 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7bb18aeb609f457abe5cfb303fd33622 2024-12-06T08:18:42,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7bb18aeb609f457abe5cfb303fd33622, entries=150, sequenceid=155, filesize=11.9 K 2024-12-06T08:18:42,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 27b39638a4e82980fa51c6694c44d0ad in 1018ms, sequenceid=155, compaction requested=false 2024-12-06T08:18:42,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:42,572 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T08:18:42,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:42,574 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T08:18:42,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:42,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:42,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:42,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:42,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:42,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:42,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/7043c2d916f5452582aee080fb0efdce is 50, key is test_row_0/A:col10/1733473121572/Put/seqid=0 2024-12-06T08:18:42,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741872_1048 (size=12151) 2024-12-06T08:18:42,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:42,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473182781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473182781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:42,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473182886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:42,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:42,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473182887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,026 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/7043c2d916f5452582aee080fb0efdce 2024-12-06T08:18:43,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473183042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473183043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/7d19aa1198434ad5a2236dbbb511be18 is 50, key is test_row_0/B:col10/1733473121572/Put/seqid=0 2024-12-06T08:18:43,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473183050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741873_1049 (size=12151) 2024-12-06T08:18:43,086 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/7d19aa1198434ad5a2236dbbb511be18 2024-12-06T08:18:43,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473183092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473183093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/6d8096de312147509593e48d453de761 is 50, key is test_row_0/C:col10/1733473121572/Put/seqid=0 2024-12-06T08:18:43,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741874_1050 (size=12151) 2024-12-06T08:18:43,155 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/6d8096de312147509593e48d453de761 2024-12-06T08:18:43,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/7043c2d916f5452582aee080fb0efdce as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7043c2d916f5452582aee080fb0efdce 2024-12-06T08:18:43,184 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7043c2d916f5452582aee080fb0efdce, entries=150, sequenceid=173, filesize=11.9 K 2024-12-06T08:18:43,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/7d19aa1198434ad5a2236dbbb511be18 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/7d19aa1198434ad5a2236dbbb511be18 2024-12-06T08:18:43,200 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/7d19aa1198434ad5a2236dbbb511be18, entries=150, sequenceid=173, filesize=11.9 K 2024-12-06T08:18:43,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/6d8096de312147509593e48d453de761 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6d8096de312147509593e48d453de761 2024-12-06T08:18:43,224 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6d8096de312147509593e48d453de761, entries=150, sequenceid=173, filesize=11.9 K 2024-12-06T08:18:43,227 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 27b39638a4e82980fa51c6694c44d0ad in 652ms, sequenceid=173, compaction requested=true 2024-12-06T08:18:43,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:43,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:43,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-06T08:18:43,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-06T08:18:43,232 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-06T08:18:43,232 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0540 sec 2024-12-06T08:18:43,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 2.0680 sec 2024-12-06T08:18:43,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T08:18:43,282 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-06T08:18:43,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:18:43,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-06T08:18:43,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T08:18:43,288 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:18:43,289 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:18:43,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:18:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T08:18:43,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:43,405 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T08:18:43,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:43,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:43,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:43,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:43,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:43,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:43,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473183433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473183435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/d1d1bac9bb6c45acaf6a4cc6d692e962 is 50, key is test_row_0/A:col10/1733473122777/Put/seqid=0 2024-12-06T08:18:43,442 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:18:43,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:43,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:43,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:43,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741875_1051 (size=12151) 2024-12-06T08:18:43,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473183537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473183542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T08:18:43,597 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:18:43,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:43,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:43,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:43,599 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473183745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:43,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473183747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,752 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:18:43,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:43,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:43,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:43,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/d1d1bac9bb6c45acaf6a4cc6d692e962 2024-12-06T08:18:43,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T08:18:43,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/5230bbf3c27f4827947728e42dd483d6 is 50, key is test_row_0/B:col10/1733473122777/Put/seqid=0 2024-12-06T08:18:43,913 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:43,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:18:43,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:43,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:43,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:43,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:43,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741876_1052 (size=12151) 2024-12-06T08:18:43,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/5230bbf3c27f4827947728e42dd483d6 2024-12-06T08:18:43,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/663f6e286dc04e3694b80f09277d848f is 50, key is test_row_0/C:col10/1733473122777/Put/seqid=0 2024-12-06T08:18:44,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741877_1053 (size=12151) 2024-12-06T08:18:44,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/663f6e286dc04e3694b80f09277d848f 2024-12-06T08:18:44,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/d1d1bac9bb6c45acaf6a4cc6d692e962 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d1d1bac9bb6c45acaf6a4cc6d692e962 2024-12-06T08:18:44,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d1d1bac9bb6c45acaf6a4cc6d692e962, entries=150, sequenceid=196, filesize=11.9 K 2024-12-06T08:18:44,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/5230bbf3c27f4827947728e42dd483d6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/5230bbf3c27f4827947728e42dd483d6 2024-12-06T08:18:44,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/5230bbf3c27f4827947728e42dd483d6, entries=150, sequenceid=196, filesize=11.9 K 2024-12-06T08:18:44,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/663f6e286dc04e3694b80f09277d848f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/663f6e286dc04e3694b80f09277d848f 2024-12-06T08:18:44,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/663f6e286dc04e3694b80f09277d848f, entries=150, sequenceid=196, filesize=11.9 K 2024-12-06T08:18:44,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 27b39638a4e82980fa51c6694c44d0ad in 652ms, sequenceid=196, compaction requested=true 2024-12-06T08:18:44,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:44,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:44,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:44,057 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:18:44,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:44,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:44,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:44,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T08:18:44,060 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51252 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:18:44,060 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:44,060 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,060 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/62609bbe693b4a81ac379173da05d75a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/cc62b5ec5e59450cb67679fc9fa8fb1a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7043c2d916f5452582aee080fb0efdce, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d1d1bac9bb6c45acaf6a4cc6d692e962] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=50.1 K 2024-12-06T08:18:44,061 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62609bbe693b4a81ac379173da05d75a, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733473120515 2024-12-06T08:18:44,061 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc62b5ec5e59450cb67679fc9fa8fb1a, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733473120879 2024-12-06T08:18:44,062 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7043c2d916f5452582aee080fb0efdce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733473121565 2024-12-06T08:18:44,063 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1d1bac9bb6c45acaf6a4cc6d692e962, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733473122754 2024-12-06T08:18:44,063 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:18:44,067 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:18:44,067 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:44,067 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,067 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fb911496ad844ae186b4667cea554b2b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e196f676be4543288077eaaa86c824ca, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/7d19aa1198434ad5a2236dbbb511be18, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/5230bbf3c27f4827947728e42dd483d6] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=47.7 K 2024-12-06T08:18:44,068 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting fb911496ad844ae186b4667cea554b2b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733473120515 2024-12-06T08:18:44,069 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e196f676be4543288077eaaa86c824ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733473120879 2024-12-06T08:18:44,070 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d19aa1198434ad5a2236dbbb511be18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733473121565 2024-12-06T08:18:44,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T08:18:44,070 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5230bbf3c27f4827947728e42dd483d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733473122754 2024-12-06T08:18:44,070 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:44,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:44,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:44,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:44,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:18:44,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:44,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:44,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:44,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:44,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/f41e4968ad5b45619f115ef89eee8a93 is 50, key is test_row_0/A:col10/1733473124061/Put/seqid=0 2024-12-06T08:18:44,115 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#40 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:44,116 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/148a8f623a214f69a9aaae68a239d957 is 50, key is test_row_0/A:col10/1733473122777/Put/seqid=0 2024-12-06T08:18:44,118 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#41 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:44,118 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/1389540a335447a886ee96fc6a1f596b is 50, key is test_row_0/B:col10/1733473122777/Put/seqid=0 2024-12-06T08:18:44,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:44,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741878_1054 (size=12151) 2024-12-06T08:18:44,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473184145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:44,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473184145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/f41e4968ad5b45619f115ef89eee8a93 2024-12-06T08:18:44,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741880_1056 (size=12595) 2024-12-06T08:18:44,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741879_1055 (size=12595) 2024-12-06T08:18:44,172 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/1389540a335447a886ee96fc6a1f596b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/1389540a335447a886ee96fc6a1f596b 2024-12-06T08:18:44,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/4f20f9d4e940436ebe0ff73f3b4912dd is 50, key is test_row_0/B:col10/1733473124061/Put/seqid=0 2024-12-06T08:18:44,183 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into 1389540a335447a886ee96fc6a1f596b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:44,183 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:44,183 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=12, startTime=1733473124057; duration=0sec 2024-12-06T08:18:44,184 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:44,184 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:44,184 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:18:44,186 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:18:44,186 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:44,186 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,186 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/67eb73d2620d4493ad8ffd8aafb26a8c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7bb18aeb609f457abe5cfb303fd33622, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6d8096de312147509593e48d453de761, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/663f6e286dc04e3694b80f09277d848f] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=47.7 K 2024-12-06T08:18:44,187 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 67eb73d2620d4493ad8ffd8aafb26a8c, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733473120515 2024-12-06T08:18:44,188 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bb18aeb609f457abe5cfb303fd33622, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733473120879 2024-12-06T08:18:44,188 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d8096de312147509593e48d453de761, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733473121565 2024-12-06T08:18:44,189 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 663f6e286dc04e3694b80f09277d848f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733473122754 2024-12-06T08:18:44,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741881_1057 (size=12151) 2024-12-06T08:18:44,225 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,227 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#43 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:44,228 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/3545d55a4bab433fab5ce4a875efecbd is 50, key is test_row_0/C:col10/1733473122777/Put/seqid=0 2024-12-06T08:18:44,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:18:44,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:44,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,230 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/4f20f9d4e940436ebe0ff73f3b4912dd 2024-12-06T08:18:44,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/44d2c83a7d5a45c291e3a516804953c2 is 50, key is test_row_0/C:col10/1733473124061/Put/seqid=0 2024-12-06T08:18:44,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:44,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473184253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:44,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473184252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741882_1058 (size=12595) 2024-12-06T08:18:44,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741883_1059 (size=12151) 2024-12-06T08:18:44,381 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:18:44,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:44,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T08:18:44,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:44,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473184459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:44,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473184460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:18:44,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:44,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,589 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/148a8f623a214f69a9aaae68a239d957 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/148a8f623a214f69a9aaae68a239d957 2024-12-06T08:18:44,597 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into 148a8f623a214f69a9aaae68a239d957(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:44,598 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:44,598 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=12, startTime=1733473124057; duration=0sec 2024-12-06T08:18:44,599 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:44,599 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:44,684 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/3545d55a4bab433fab5ce4a875efecbd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/3545d55a4bab433fab5ce4a875efecbd 2024-12-06T08:18:44,694 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into 3545d55a4bab433fab5ce4a875efecbd(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:44,694 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:44,694 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=12, startTime=1733473124058; duration=0sec 2024-12-06T08:18:44,694 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:44,694 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:44,694 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:18:44,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:44,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:44,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/44d2c83a7d5a45c291e3a516804953c2 2024-12-06T08:18:44,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/f41e4968ad5b45619f115ef89eee8a93 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/f41e4968ad5b45619f115ef89eee8a93 2024-12-06T08:18:44,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/f41e4968ad5b45619f115ef89eee8a93, entries=150, sequenceid=211, filesize=11.9 K 2024-12-06T08:18:44,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/4f20f9d4e940436ebe0ff73f3b4912dd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4f20f9d4e940436ebe0ff73f3b4912dd 2024-12-06T08:18:44,768 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4f20f9d4e940436ebe0ff73f3b4912dd, entries=150, sequenceid=211, filesize=11.9 K 2024-12-06T08:18:44,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/44d2c83a7d5a45c291e3a516804953c2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/44d2c83a7d5a45c291e3a516804953c2 2024-12-06T08:18:44,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473184765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473184769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/44d2c83a7d5a45c291e3a516804953c2, entries=150, sequenceid=211, filesize=11.9 K 2024-12-06T08:18:44,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 27b39638a4e82980fa51c6694c44d0ad in 712ms, sequenceid=211, compaction requested=false 2024-12-06T08:18:44,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:44,850 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:44,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:18:44,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:44,852 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T08:18:44,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:44,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:44,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:44,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:44,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:44,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:44,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/4b6a43d571dc48e9866f713139a0e7b9 is 50, key is test_row_0/A:col10/1733473124128/Put/seqid=0 2024-12-06T08:18:44,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741884_1060 (size=12151) 2024-12-06T08:18:44,930 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/4b6a43d571dc48e9866f713139a0e7b9 2024-12-06T08:18:44,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/bc13c50ca49c4e10816e8fff06fcd7e0 is 50, key is test_row_0/B:col10/1733473124128/Put/seqid=0 2024-12-06T08:18:44,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741885_1061 (size=12151) 2024-12-06T08:18:44,984 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/bc13c50ca49c4e10816e8fff06fcd7e0 2024-12-06T08:18:45,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/c66786d7546e4924b8772f3977d8fd40 is 50, key is test_row_0/C:col10/1733473124128/Put/seqid=0 2024-12-06T08:18:45,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741886_1062 (size=12151) 2024-12-06T08:18:45,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:45,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:45,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473185082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473185093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473185102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473185209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473185209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473185216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473185279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473185279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T08:18:45,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473185413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473185416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473185421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,427 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/c66786d7546e4924b8772f3977d8fd40 2024-12-06T08:18:45,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/4b6a43d571dc48e9866f713139a0e7b9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/4b6a43d571dc48e9866f713139a0e7b9 2024-12-06T08:18:45,449 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/4b6a43d571dc48e9866f713139a0e7b9, entries=150, sequenceid=236, filesize=11.9 K 2024-12-06T08:18:45,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/bc13c50ca49c4e10816e8fff06fcd7e0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/bc13c50ca49c4e10816e8fff06fcd7e0 2024-12-06T08:18:45,460 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/bc13c50ca49c4e10816e8fff06fcd7e0, entries=150, sequenceid=236, filesize=11.9 K 2024-12-06T08:18:45,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/c66786d7546e4924b8772f3977d8fd40 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/c66786d7546e4924b8772f3977d8fd40 2024-12-06T08:18:45,469 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/c66786d7546e4924b8772f3977d8fd40, entries=150, sequenceid=236, filesize=11.9 K 2024-12-06T08:18:45,471 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 27b39638a4e82980fa51c6694c44d0ad in 620ms, sequenceid=236, compaction requested=true 2024-12-06T08:18:45,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:45,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:45,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-06T08:18:45,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-06T08:18:45,476 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-06T08:18:45,476 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1840 sec 2024-12-06T08:18:45,479 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.1930 sec 2024-12-06T08:18:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:45,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T08:18:45,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:45,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:45,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:45,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:45,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:45,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:45,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/6dc08bdfade64af1a5cbbc4ac267c845 is 50, key is test_row_0/A:col10/1733473125722/Put/seqid=0 2024-12-06T08:18:45,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741887_1063 (size=12151) 2024-12-06T08:18:45,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/6dc08bdfade64af1a5cbbc4ac267c845 2024-12-06T08:18:45,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473185764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473185764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473185768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/a6c8806c5e1a42d7bd6d551ebbf73da2 is 50, key is test_row_0/B:col10/1733473125722/Put/seqid=0 2024-12-06T08:18:45,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741888_1064 (size=12151) 2024-12-06T08:18:45,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/a6c8806c5e1a42d7bd6d551ebbf73da2 2024-12-06T08:18:45,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/86419d7e4bfa4b2d91e3066720f8d0ea is 50, key is test_row_0/C:col10/1733473125722/Put/seqid=0 2024-12-06T08:18:45,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473185870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473185871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473185876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:45,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741889_1065 (size=12151) 2024-12-06T08:18:45,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/86419d7e4bfa4b2d91e3066720f8d0ea 2024-12-06T08:18:45,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/6dc08bdfade64af1a5cbbc4ac267c845 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6dc08bdfade64af1a5cbbc4ac267c845 2024-12-06T08:18:45,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6dc08bdfade64af1a5cbbc4ac267c845, entries=150, sequenceid=251, filesize=11.9 K 2024-12-06T08:18:45,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/a6c8806c5e1a42d7bd6d551ebbf73da2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a6c8806c5e1a42d7bd6d551ebbf73da2 2024-12-06T08:18:45,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a6c8806c5e1a42d7bd6d551ebbf73da2, entries=150, sequenceid=251, filesize=11.9 K 2024-12-06T08:18:45,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/86419d7e4bfa4b2d91e3066720f8d0ea as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/86419d7e4bfa4b2d91e3066720f8d0ea 2024-12-06T08:18:45,922 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/86419d7e4bfa4b2d91e3066720f8d0ea, entries=150, sequenceid=251, filesize=11.9 K 2024-12-06T08:18:45,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 27b39638a4e82980fa51c6694c44d0ad in 200ms, sequenceid=251, compaction requested=true 2024-12-06T08:18:45,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:45,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:45,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:45,925 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:18:45,925 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:18:45,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:45,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:45,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:45,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:45,927 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:18:45,927 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:45,928 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:45,928 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/148a8f623a214f69a9aaae68a239d957, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/f41e4968ad5b45619f115ef89eee8a93, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/4b6a43d571dc48e9866f713139a0e7b9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6dc08bdfade64af1a5cbbc4ac267c845] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=47.9 K 2024-12-06T08:18:45,928 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:18:45,928 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:45,928 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:45,928 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/1389540a335447a886ee96fc6a1f596b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4f20f9d4e940436ebe0ff73f3b4912dd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/bc13c50ca49c4e10816e8fff06fcd7e0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a6c8806c5e1a42d7bd6d551ebbf73da2] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=47.9 K 2024-12-06T08:18:45,929 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 148a8f623a214f69a9aaae68a239d957, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733473122754 2024-12-06T08:18:45,929 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1389540a335447a886ee96fc6a1f596b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733473122754 2024-12-06T08:18:45,929 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting f41e4968ad5b45619f115ef89eee8a93, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733473123433 2024-12-06T08:18:45,930 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f20f9d4e940436ebe0ff73f3b4912dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733473123433 2024-12-06T08:18:45,931 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b6a43d571dc48e9866f713139a0e7b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733473124128 2024-12-06T08:18:45,931 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting bc13c50ca49c4e10816e8fff06fcd7e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733473124128 2024-12-06T08:18:45,931 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6dc08bdfade64af1a5cbbc4ac267c845, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733473125066 2024-12-06T08:18:45,932 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a6c8806c5e1a42d7bd6d551ebbf73da2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733473125066 2024-12-06T08:18:45,960 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#51 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:45,961 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/94f1e863f7244639937884b410a5a690 is 50, key is test_row_0/A:col10/1733473125722/Put/seqid=0 2024-12-06T08:18:45,978 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#52 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:45,979 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e5d2e54b0c274c83bf28c878ea42efb5 is 50, key is test_row_0/B:col10/1733473125722/Put/seqid=0 2024-12-06T08:18:46,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741890_1066 (size=12731) 2024-12-06T08:18:46,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741891_1067 (size=12731) 2024-12-06T08:18:46,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:46,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T08:18:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:46,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/478a88e539984ae09fad60a300454b13 is 50, key is test_row_0/A:col10/1733473125759/Put/seqid=0 2024-12-06T08:18:46,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473186109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473186110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473186112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741892_1068 (size=14741) 2024-12-06T08:18:46,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/478a88e539984ae09fad60a300454b13 2024-12-06T08:18:46,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/016c080e592b47cebde06be786a2cc08 is 50, key is test_row_0/B:col10/1733473125759/Put/seqid=0 2024-12-06T08:18:46,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741893_1069 (size=12301) 2024-12-06T08:18:46,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473186217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473186218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473186218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473186289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473186300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473186422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473186423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473186425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,435 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/94f1e863f7244639937884b410a5a690 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/94f1e863f7244639937884b410a5a690 2024-12-06T08:18:46,448 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into 94f1e863f7244639937884b410a5a690(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:46,448 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:46,448 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=12, startTime=1733473125925; duration=0sec 2024-12-06T08:18:46,448 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:46,448 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:46,448 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:18:46,450 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:18:46,450 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:46,450 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:46,451 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/3545d55a4bab433fab5ce4a875efecbd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/44d2c83a7d5a45c291e3a516804953c2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/c66786d7546e4924b8772f3977d8fd40, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/86419d7e4bfa4b2d91e3066720f8d0ea] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=47.9 K 2024-12-06T08:18:46,454 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3545d55a4bab433fab5ce4a875efecbd, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733473122754 2024-12-06T08:18:46,455 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44d2c83a7d5a45c291e3a516804953c2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733473123433 2024-12-06T08:18:46,456 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c66786d7546e4924b8772f3977d8fd40, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733473124128 2024-12-06T08:18:46,457 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86419d7e4bfa4b2d91e3066720f8d0ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733473125066 2024-12-06T08:18:46,460 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e5d2e54b0c274c83bf28c878ea42efb5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e5d2e54b0c274c83bf28c878ea42efb5 2024-12-06T08:18:46,467 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into e5d2e54b0c274c83bf28c878ea42efb5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:46,467 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:46,467 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=12, startTime=1733473125925; duration=0sec 2024-12-06T08:18:46,467 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:46,467 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:46,480 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#55 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:46,481 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/377410a8be82443cad4c80c73ff02fec is 50, key is test_row_0/C:col10/1733473125722/Put/seqid=0 2024-12-06T08:18:46,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741894_1070 (size=12731) 2024-12-06T08:18:46,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/016c080e592b47cebde06be786a2cc08 2024-12-06T08:18:46,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/cd0457d713ba43ed87f75484ab24b2dc is 50, key is test_row_0/C:col10/1733473125759/Put/seqid=0 2024-12-06T08:18:46,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741895_1071 (size=12301) 2024-12-06T08:18:46,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473186729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473186730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:46,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473186730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:46,911 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/377410a8be82443cad4c80c73ff02fec as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/377410a8be82443cad4c80c73ff02fec 2024-12-06T08:18:46,926 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into 377410a8be82443cad4c80c73ff02fec(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:46,926 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:46,926 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=12, startTime=1733473125926; duration=0sec 2024-12-06T08:18:46,926 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:46,926 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:47,070 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/cd0457d713ba43ed87f75484ab24b2dc 2024-12-06T08:18:47,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/478a88e539984ae09fad60a300454b13 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/478a88e539984ae09fad60a300454b13 2024-12-06T08:18:47,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/478a88e539984ae09fad60a300454b13, entries=200, sequenceid=273, filesize=14.4 K 2024-12-06T08:18:47,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/016c080e592b47cebde06be786a2cc08 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/016c080e592b47cebde06be786a2cc08 2024-12-06T08:18:47,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/016c080e592b47cebde06be786a2cc08, entries=150, sequenceid=273, filesize=12.0 K 2024-12-06T08:18:47,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/cd0457d713ba43ed87f75484ab24b2dc as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cd0457d713ba43ed87f75484ab24b2dc 2024-12-06T08:18:47,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cd0457d713ba43ed87f75484ab24b2dc, entries=150, sequenceid=273, filesize=12.0 K 2024-12-06T08:18:47,106 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 27b39638a4e82980fa51c6694c44d0ad in 1027ms, sequenceid=273, compaction requested=false 2024-12-06T08:18:47,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:47,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:47,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T08:18:47,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:47,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:47,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:47,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:47,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:47,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:47,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/1f4662d905ab46e0964bb8fc47fb5c85 is 50, key is test_row_0/A:col10/1733473126111/Put/seqid=0 2024-12-06T08:18:47,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741896_1072 (size=14741) 2024-12-06T08:18:47,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/1f4662d905ab46e0964bb8fc47fb5c85 2024-12-06T08:18:47,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473187302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473187312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473187313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/abc52584142444f3834b42e6864ca9ee is 50, key is test_row_0/B:col10/1733473126111/Put/seqid=0 2024-12-06T08:18:47,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741897_1073 (size=12301) 2024-12-06T08:18:47,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/abc52584142444f3834b42e6864ca9ee 2024-12-06T08:18:47,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/72321a123bac433bac7f936c5d0045d6 is 50, key is test_row_0/C:col10/1733473126111/Put/seqid=0 2024-12-06T08:18:47,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741898_1074 (size=12301) 2024-12-06T08:18:47,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/72321a123bac433bac7f936c5d0045d6 2024-12-06T08:18:47,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/1f4662d905ab46e0964bb8fc47fb5c85 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/1f4662d905ab46e0964bb8fc47fb5c85 2024-12-06T08:18:47,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/1f4662d905ab46e0964bb8fc47fb5c85, entries=200, sequenceid=292, filesize=14.4 K 2024-12-06T08:18:47,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/abc52584142444f3834b42e6864ca9ee as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/abc52584142444f3834b42e6864ca9ee 2024-12-06T08:18:47,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T08:18:47,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473187411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/abc52584142444f3834b42e6864ca9ee, entries=150, sequenceid=292, filesize=12.0 K 2024-12-06T08:18:47,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473187416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/72321a123bac433bac7f936c5d0045d6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/72321a123bac433bac7f936c5d0045d6 2024-12-06T08:18:47,424 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-06T08:18:47,426 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:18:47,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-06T08:18:47,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T08:18:47,429 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:18:47,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473187418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,431 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:18:47,431 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:18:47,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/72321a123bac433bac7f936c5d0045d6, entries=150, sequenceid=292, filesize=12.0 K 2024-12-06T08:18:47,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 27b39638a4e82980fa51c6694c44d0ad in 191ms, sequenceid=292, compaction requested=true 2024-12-06T08:18:47,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:47,435 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:47,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:47,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:47,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:47,436 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:47,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:47,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:47,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:47,437 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42213 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:47,437 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:47,437 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:47,437 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/94f1e863f7244639937884b410a5a690, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/478a88e539984ae09fad60a300454b13, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/1f4662d905ab46e0964bb8fc47fb5c85] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=41.2 K 2024-12-06T08:18:47,438 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94f1e863f7244639937884b410a5a690, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733473125066 2024-12-06T08:18:47,438 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:47,438 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:47,439 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:47,439 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e5d2e54b0c274c83bf28c878ea42efb5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/016c080e592b47cebde06be786a2cc08, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/abc52584142444f3834b42e6864ca9ee] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=36.5 K 2024-12-06T08:18:47,440 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e5d2e54b0c274c83bf28c878ea42efb5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733473125066 2024-12-06T08:18:47,440 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 478a88e539984ae09fad60a300454b13, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733473125759 2024-12-06T08:18:47,440 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 016c080e592b47cebde06be786a2cc08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733473125759 2024-12-06T08:18:47,441 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f4662d905ab46e0964bb8fc47fb5c85, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733473126106 2024-12-06T08:18:47,441 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting abc52584142444f3834b42e6864ca9ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733473126111 2024-12-06T08:18:47,460 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#60 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:47,461 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/46808a14ac5c4c8bb4b7d68fc1fabf2c is 50, key is test_row_0/B:col10/1733473126111/Put/seqid=0 2024-12-06T08:18:47,466 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#61 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:47,466 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/e1c715fdc7bb4cc78e9cc9009b4ad4ea is 50, key is test_row_0/A:col10/1733473126111/Put/seqid=0 2024-12-06T08:18:47,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741899_1075 (size=12983) 2024-12-06T08:18:47,499 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/46808a14ac5c4c8bb4b7d68fc1fabf2c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/46808a14ac5c4c8bb4b7d68fc1fabf2c 2024-12-06T08:18:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741900_1076 (size=12983) 2024-12-06T08:18:47,511 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into 46808a14ac5c4c8bb4b7d68fc1fabf2c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:47,512 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:47,512 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=13, startTime=1733473127436; duration=0sec 2024-12-06T08:18:47,512 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:47,512 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:47,512 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:47,516 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/e1c715fdc7bb4cc78e9cc9009b4ad4ea as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/e1c715fdc7bb4cc78e9cc9009b4ad4ea 2024-12-06T08:18:47,516 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:47,517 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:47,517 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:47,518 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/377410a8be82443cad4c80c73ff02fec, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cd0457d713ba43ed87f75484ab24b2dc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/72321a123bac433bac7f936c5d0045d6] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=36.5 K 2024-12-06T08:18:47,519 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 377410a8be82443cad4c80c73ff02fec, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733473125066 2024-12-06T08:18:47,520 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting cd0457d713ba43ed87f75484ab24b2dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733473125759 2024-12-06T08:18:47,521 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 72321a123bac433bac7f936c5d0045d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733473126111 2024-12-06T08:18:47,524 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into e1c715fdc7bb4cc78e9cc9009b4ad4ea(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:47,524 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:47,524 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=13, startTime=1733473127435; duration=0sec 2024-12-06T08:18:47,524 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:47,524 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:47,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T08:18:47,537 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#62 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:47,537 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/37c1d697f4784c8bb3bf366bbd95afc7 is 50, key is test_row_0/C:col10/1733473126111/Put/seqid=0 2024-12-06T08:18:47,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741901_1077 (size=12983) 2024-12-06T08:18:47,565 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/37c1d697f4784c8bb3bf366bbd95afc7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/37c1d697f4784c8bb3bf366bbd95afc7 2024-12-06T08:18:47,575 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into 37c1d697f4784c8bb3bf366bbd95afc7(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:47,575 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:47,575 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=13, startTime=1733473127436; duration=0sec 2024-12-06T08:18:47,575 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:47,575 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:47,584 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-06T08:18:47,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:47,586 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-06T08:18:47,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:47,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:47,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:47,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:47,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:47,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:47,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/dbd33bc4a8034ed4840ee5825937789a is 50, key is test_row_0/A:col10/1733473127293/Put/seqid=0 2024-12-06T08:18:47,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741902_1078 (size=12301) 2024-12-06T08:18:47,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:47,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:47,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473187639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473187640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473187640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T08:18:47,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473187743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473187745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473187745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473187949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473187947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:47,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:47,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473187950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,020 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/dbd33bc4a8034ed4840ee5825937789a 2024-12-06T08:18:48,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T08:18:48,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/dee2c5be9d5d43d28e782d67dae1eea9 is 50, key is test_row_0/B:col10/1733473127293/Put/seqid=0 2024-12-06T08:18:48,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741903_1079 (size=12301) 2024-12-06T08:18:48,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473188251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473188251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473188258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473188312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,315 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:18:48,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473188314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,316 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:18:48,465 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/dee2c5be9d5d43d28e782d67dae1eea9 2024-12-06T08:18:48,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/641eaeed1dad411488f656cc12b02b31 is 50, key is test_row_0/C:col10/1733473127293/Put/seqid=0 2024-12-06T08:18:48,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741904_1080 (size=12301) 2024-12-06T08:18:48,484 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/641eaeed1dad411488f656cc12b02b31 2024-12-06T08:18:48,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/dbd33bc4a8034ed4840ee5825937789a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/dbd33bc4a8034ed4840ee5825937789a 2024-12-06T08:18:48,497 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/dbd33bc4a8034ed4840ee5825937789a, entries=150, sequenceid=316, filesize=12.0 K 2024-12-06T08:18:48,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/dee2c5be9d5d43d28e782d67dae1eea9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/dee2c5be9d5d43d28e782d67dae1eea9 2024-12-06T08:18:48,507 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/dee2c5be9d5d43d28e782d67dae1eea9, entries=150, sequenceid=316, filesize=12.0 K 2024-12-06T08:18:48,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/641eaeed1dad411488f656cc12b02b31 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/641eaeed1dad411488f656cc12b02b31 2024-12-06T08:18:48,516 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/641eaeed1dad411488f656cc12b02b31, entries=150, sequenceid=316, filesize=12.0 K 2024-12-06T08:18:48,518 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 27b39638a4e82980fa51c6694c44d0ad in 932ms, sequenceid=316, compaction requested=false 2024-12-06T08:18:48,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:48,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:48,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-06T08:18:48,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-06T08:18:48,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-06T08:18:48,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0890 sec 2024-12-06T08:18:48,525 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.0980 sec 2024-12-06T08:18:48,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T08:18:48,533 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-06T08:18:48,535 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:18:48,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-06T08:18:48,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T08:18:48,538 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:18:48,539 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:18:48,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:18:48,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T08:18:48,691 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-06T08:18:48,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:48,692 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T08:18:48,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:48,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:48,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:48,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:48,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:48,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:48,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/bc8f611e34c34ecdaceefc1390a08994 is 50, key is test_row_0/A:col10/1733473127638/Put/seqid=0 2024-12-06T08:18:48,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741905_1081 (size=12301) 2024-12-06T08:18:48,727 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/bc8f611e34c34ecdaceefc1390a08994 2024-12-06T08:18:48,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e0bc1044923a479e9d08dc15bc98936b is 50, key is test_row_0/B:col10/1733473127638/Put/seqid=0 2024-12-06T08:18:48,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741906_1082 (size=12301) 2024-12-06T08:18:48,756 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e0bc1044923a479e9d08dc15bc98936b 2024-12-06T08:18:48,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:48,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:48,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/6e5b7cd954c14aeaa8c271ee4b54b4e0 is 50, key is test_row_0/C:col10/1733473127638/Put/seqid=0 2024-12-06T08:18:48,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741907_1083 (size=12301) 2024-12-06T08:18:48,803 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/6e5b7cd954c14aeaa8c271ee4b54b4e0 2024-12-06T08:18:48,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473188799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473188799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473188804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/bc8f611e34c34ecdaceefc1390a08994 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bc8f611e34c34ecdaceefc1390a08994 2024-12-06T08:18:48,825 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bc8f611e34c34ecdaceefc1390a08994, entries=150, sequenceid=331, filesize=12.0 K 2024-12-06T08:18:48,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/e0bc1044923a479e9d08dc15bc98936b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0bc1044923a479e9d08dc15bc98936b 2024-12-06T08:18:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T08:18:48,843 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0bc1044923a479e9d08dc15bc98936b, entries=150, sequenceid=331, filesize=12.0 K 2024-12-06T08:18:48,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/6e5b7cd954c14aeaa8c271ee4b54b4e0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6e5b7cd954c14aeaa8c271ee4b54b4e0 2024-12-06T08:18:48,852 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6e5b7cd954c14aeaa8c271ee4b54b4e0, entries=150, sequenceid=331, filesize=12.0 K 2024-12-06T08:18:48,853 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 27b39638a4e82980fa51c6694c44d0ad in 160ms, sequenceid=331, compaction requested=true 2024-12-06T08:18:48,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:48,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:48,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-06T08:18:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-06T08:18:48,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-06T08:18:48,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 316 msec 2024-12-06T08:18:48,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 323 msec 2024-12-06T08:18:48,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:48,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T08:18:48,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:48,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:48,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:48,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:48,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:48,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:48,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/30d6f2667bf84e38b0bddfb152e14641 is 50, key is test_row_0/A:col10/1733473128918/Put/seqid=0 2024-12-06T08:18:48,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473188938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473188939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:48,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473188939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:48,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741908_1084 (size=19621) 2024-12-06T08:18:49,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473189041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473189045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473189046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T08:18:49,141 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-06T08:18:49,143 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:18:49,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-06T08:18:49,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-06T08:18:49,146 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:18:49,147 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:18:49,147 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:18:49,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473189244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-06T08:18:49,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473189251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473189252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,300 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-06T08:18:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:49,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:49,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:49,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/30d6f2667bf84e38b0bddfb152e14641 2024-12-06T08:18:49,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/63564774edc844dc91ccf335c812774a is 50, key is test_row_0/B:col10/1733473128918/Put/seqid=0 2024-12-06T08:18:49,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741909_1085 (size=12301) 2024-12-06T08:18:49,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/63564774edc844dc91ccf335c812774a 2024-12-06T08:18:49,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/6214f3c3ff474addb59fab0907424bcd is 50, key is test_row_0/C:col10/1733473128918/Put/seqid=0 2024-12-06T08:18:49,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741910_1086 (size=12301) 2024-12-06T08:18:49,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/6214f3c3ff474addb59fab0907424bcd 2024-12-06T08:18:49,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/30d6f2667bf84e38b0bddfb152e14641 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/30d6f2667bf84e38b0bddfb152e14641 2024-12-06T08:18:49,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/30d6f2667bf84e38b0bddfb152e14641, entries=300, sequenceid=355, filesize=19.2 K 2024-12-06T08:18:49,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/63564774edc844dc91ccf335c812774a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/63564774edc844dc91ccf335c812774a 2024-12-06T08:18:49,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/63564774edc844dc91ccf335c812774a, entries=150, sequenceid=355, filesize=12.0 K 2024-12-06T08:18:49,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/6214f3c3ff474addb59fab0907424bcd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6214f3c3ff474addb59fab0907424bcd 2024-12-06T08:18:49,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6214f3c3ff474addb59fab0907424bcd, entries=150, sequenceid=355, filesize=12.0 K 2024-12-06T08:18:49,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 27b39638a4e82980fa51c6694c44d0ad in 528ms, sequenceid=355, compaction requested=true 2024-12-06T08:18:49,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:49,446 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:18:49,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:49,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:49,446 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:18:49,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:49,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:49,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:49,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:49,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-06T08:18:49,449 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57206 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:18:49,449 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:49,449 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:49,449 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/e1c715fdc7bb4cc78e9cc9009b4ad4ea, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/dbd33bc4a8034ed4840ee5825937789a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bc8f611e34c34ecdaceefc1390a08994, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/30d6f2667bf84e38b0bddfb152e14641] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=55.9 K 2024-12-06T08:18:49,449 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:18:49,449 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:49,449 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:49,450 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/46808a14ac5c4c8bb4b7d68fc1fabf2c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/dee2c5be9d5d43d28e782d67dae1eea9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0bc1044923a479e9d08dc15bc98936b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/63564774edc844dc91ccf335c812774a] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=48.7 K 2024-12-06T08:18:49,450 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 46808a14ac5c4c8bb4b7d68fc1fabf2c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733473126111 2024-12-06T08:18:49,451 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1c715fdc7bb4cc78e9cc9009b4ad4ea, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733473126111 2024-12-06T08:18:49,451 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting dee2c5be9d5d43d28e782d67dae1eea9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733473127293 2024-12-06T08:18:49,451 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e0bc1044923a479e9d08dc15bc98936b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733473127634 2024-12-06T08:18:49,451 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbd33bc4a8034ed4840ee5825937789a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733473127293 2024-12-06T08:18:49,452 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 63564774edc844dc91ccf335c812774a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733473128796 2024-12-06T08:18:49,452 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc8f611e34c34ecdaceefc1390a08994, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733473127634 2024-12-06T08:18:49,452 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30d6f2667bf84e38b0bddfb152e14641, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733473128791 2024-12-06T08:18:49,454 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-06T08:18:49,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:49,455 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T08:18:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:49,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/05d8403d9f3f4316b2c0de647ec92977 is 50, key is test_row_0/A:col10/1733473128937/Put/seqid=0 2024-12-06T08:18:49,479 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#74 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:49,480 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#73 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:49,480 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/759a315e44974a85a656050771b6057e is 50, key is test_row_0/A:col10/1733473128918/Put/seqid=0 2024-12-06T08:18:49,481 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/f071cf2fef1340ee90d13a3524dbeb29 is 50, key is test_row_0/B:col10/1733473128918/Put/seqid=0 2024-12-06T08:18:49,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741911_1087 (size=12301) 2024-12-06T08:18:49,493 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/05d8403d9f3f4316b2c0de647ec92977 2024-12-06T08:18:49,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/a84018974bda4530bc3951218f32ef58 is 50, key is test_row_0/B:col10/1733473128937/Put/seqid=0 2024-12-06T08:18:49,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741912_1088 (size=13119) 2024-12-06T08:18:49,516 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/759a315e44974a85a656050771b6057e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/759a315e44974a85a656050771b6057e 2024-12-06T08:18:49,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741913_1089 (size=13119) 2024-12-06T08:18:49,529 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into 759a315e44974a85a656050771b6057e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:49,529 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:49,529 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=12, startTime=1733473129446; duration=0sec 2024-12-06T08:18:49,529 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:49,529 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:49,530 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:18:49,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741914_1090 (size=12301) 2024-12-06T08:18:49,533 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/f071cf2fef1340ee90d13a3524dbeb29 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f071cf2fef1340ee90d13a3524dbeb29 2024-12-06T08:18:49,534 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/a84018974bda4530bc3951218f32ef58 2024-12-06T08:18:49,539 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:18:49,539 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:49,540 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:49,540 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/37c1d697f4784c8bb3bf366bbd95afc7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/641eaeed1dad411488f656cc12b02b31, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6e5b7cd954c14aeaa8c271ee4b54b4e0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6214f3c3ff474addb59fab0907424bcd] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=48.7 K 2024-12-06T08:18:49,541 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37c1d697f4784c8bb3bf366bbd95afc7, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733473126111 2024-12-06T08:18:49,542 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 641eaeed1dad411488f656cc12b02b31, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733473127293 2024-12-06T08:18:49,543 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e5b7cd954c14aeaa8c271ee4b54b4e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733473127634 2024-12-06T08:18:49,546 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6214f3c3ff474addb59fab0907424bcd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733473128796 2024-12-06T08:18:49,546 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into f071cf2fef1340ee90d13a3524dbeb29(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:49,546 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:49,546 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=12, startTime=1733473129446; duration=0sec 2024-12-06T08:18:49,546 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:49,546 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:49,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/f9687c25ce0d43c480e138977e0c2643 is 50, key is test_row_0/C:col10/1733473128937/Put/seqid=0 2024-12-06T08:18:49,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:49,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:49,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741915_1091 (size=12301) 2024-12-06T08:18:49,562 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/f9687c25ce0d43c480e138977e0c2643 2024-12-06T08:18:49,564 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#77 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:49,565 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/479e693598c246fc9dafcba99fdb64ce is 50, key is test_row_0/C:col10/1733473128918/Put/seqid=0 2024-12-06T08:18:49,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/05d8403d9f3f4316b2c0de647ec92977 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/05d8403d9f3f4316b2c0de647ec92977 2024-12-06T08:18:49,581 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/05d8403d9f3f4316b2c0de647ec92977, entries=150, sequenceid=367, filesize=12.0 K 2024-12-06T08:18:49,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/a84018974bda4530bc3951218f32ef58 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a84018974bda4530bc3951218f32ef58 2024-12-06T08:18:49,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741916_1092 (size=13119) 2024-12-06T08:18:49,591 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a84018974bda4530bc3951218f32ef58, entries=150, sequenceid=367, filesize=12.0 K 2024-12-06T08:18:49,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/f9687c25ce0d43c480e138977e0c2643 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/f9687c25ce0d43c480e138977e0c2643 2024-12-06T08:18:49,594 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/479e693598c246fc9dafcba99fdb64ce as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/479e693598c246fc9dafcba99fdb64ce 2024-12-06T08:18:49,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473189595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,603 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into 479e693598c246fc9dafcba99fdb64ce(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:49,604 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473189600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,604 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=12, startTime=1733473129447; duration=0sec 2024-12-06T08:18:49,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473189600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,604 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:49,604 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:49,606 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/f9687c25ce0d43c480e138977e0c2643, entries=150, sequenceid=367, filesize=12.0 K 2024-12-06T08:18:49,607 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 27b39638a4e82980fa51c6694c44d0ad in 152ms, sequenceid=367, compaction requested=false 2024-12-06T08:18:49,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:49,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:49,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-06T08:18:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-06T08:18:49,611 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-06T08:18:49,611 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 462 msec 2024-12-06T08:18:49,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 469 msec 2024-12-06T08:18:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:49,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:18:49,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:49,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:49,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:49,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:49,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:49,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:49,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/adee3a4c153146ceabbb731777b18343 is 50, key is test_row_0/A:col10/1733473129702/Put/seqid=0 2024-12-06T08:18:49,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473189715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473189721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473189721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741917_1093 (size=12301) 2024-12-06T08:18:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-06T08:18:49,749 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-06T08:18:49,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:18:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-06T08:18:49,753 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:18:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-06T08:18:49,756 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:18:49,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:18:49,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473189822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473189825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:49,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473189830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-06T08:18:49,908 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:49,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-06T08:18:49,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:49,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:49,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:49,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:49,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:49,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:50,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473190025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473190027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473190033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-06T08:18:50,061 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-06T08:18:50,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:50,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:50,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:50,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:50,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:50,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:50,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/adee3a4c153146ceabbb731777b18343 2024-12-06T08:18:50,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/d401bd95689f4e689a834a1137134375 is 50, key is test_row_0/B:col10/1733473129702/Put/seqid=0 2024-12-06T08:18:50,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741918_1094 (size=12301) 2024-12-06T08:18:50,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/d401bd95689f4e689a834a1137134375 2024-12-06T08:18:50,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/d2b4663095234eafbb4a5bbe797a3e9a is 50, key is test_row_0/C:col10/1733473129702/Put/seqid=0 2024-12-06T08:18:50,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741919_1095 (size=12301) 2024-12-06T08:18:50,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/d2b4663095234eafbb4a5bbe797a3e9a 2024-12-06T08:18:50,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/adee3a4c153146ceabbb731777b18343 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/adee3a4c153146ceabbb731777b18343 2024-12-06T08:18:50,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/adee3a4c153146ceabbb731777b18343, entries=150, sequenceid=395, filesize=12.0 K 2024-12-06T08:18:50,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/d401bd95689f4e689a834a1137134375 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d401bd95689f4e689a834a1137134375 2024-12-06T08:18:50,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d401bd95689f4e689a834a1137134375, entries=150, sequenceid=395, filesize=12.0 K 2024-12-06T08:18:50,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/d2b4663095234eafbb4a5bbe797a3e9a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/d2b4663095234eafbb4a5bbe797a3e9a 2024-12-06T08:18:50,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/d2b4663095234eafbb4a5bbe797a3e9a, entries=150, sequenceid=395, filesize=12.0 K 2024-12-06T08:18:50,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 27b39638a4e82980fa51c6694c44d0ad in 510ms, sequenceid=395, compaction requested=true 2024-12-06T08:18:50,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:50,215 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:50,215 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-06T08:18:50,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:50,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:50,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:50,216 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:50,216 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:18:50,216 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:50,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:50,217 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:50,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:50,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:50,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:50,217 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:50,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:50,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:50,217 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/759a315e44974a85a656050771b6057e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/05d8403d9f3f4316b2c0de647ec92977, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/adee3a4c153146ceabbb731777b18343] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=36.8 K 2024-12-06T08:18:50,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:50,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:50,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:50,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:50,218 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 759a315e44974a85a656050771b6057e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733473128796 2024-12-06T08:18:50,219 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05d8403d9f3f4316b2c0de647ec92977, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733473128936 2024-12-06T08:18:50,219 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:50,220 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:50,220 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:50,220 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f071cf2fef1340ee90d13a3524dbeb29, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a84018974bda4530bc3951218f32ef58, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d401bd95689f4e689a834a1137134375] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=36.8 K 2024-12-06T08:18:50,220 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting adee3a4c153146ceabbb731777b18343, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733473129586 2024-12-06T08:18:50,221 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f071cf2fef1340ee90d13a3524dbeb29, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733473128796 2024-12-06T08:18:50,222 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a84018974bda4530bc3951218f32ef58, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733473128936 2024-12-06T08:18:50,224 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting d401bd95689f4e689a834a1137134375, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733473129586 2024-12-06T08:18:50,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/7f313d72156a4410bdcc69a02cb2393f is 50, key is test_row_0/A:col10/1733473129708/Put/seqid=0 2024-12-06T08:18:50,240 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:50,241 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/347163e905054d3986f713b32b54f16a is 50, key is test_row_0/B:col10/1733473129702/Put/seqid=0 2024-12-06T08:18:50,245 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#83 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:50,245 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/9262a91827e440c88819ad6f530e4148 is 50, key is test_row_0/A:col10/1733473129702/Put/seqid=0 2024-12-06T08:18:50,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741921_1097 (size=12301) 2024-12-06T08:18:50,273 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/7f313d72156a4410bdcc69a02cb2393f 2024-12-06T08:18:50,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741920_1096 (size=13221) 2024-12-06T08:18:50,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741922_1098 (size=13221) 2024-12-06T08:18:50,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/78d34f6ac2124bfeab7fb5a995b51189 is 50, key is test_row_0/B:col10/1733473129708/Put/seqid=0 2024-12-06T08:18:50,292 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/347163e905054d3986f713b32b54f16a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/347163e905054d3986f713b32b54f16a 2024-12-06T08:18:50,300 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into 347163e905054d3986f713b32b54f16a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:50,300 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:50,300 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=13, startTime=1733473130216; duration=0sec 2024-12-06T08:18:50,300 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:50,300 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:50,301 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:50,302 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:50,302 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:50,302 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:50,302 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/479e693598c246fc9dafcba99fdb64ce, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/f9687c25ce0d43c480e138977e0c2643, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/d2b4663095234eafbb4a5bbe797a3e9a] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=36.8 K 2024-12-06T08:18:50,303 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 479e693598c246fc9dafcba99fdb64ce, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733473128796 2024-12-06T08:18:50,303 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f9687c25ce0d43c480e138977e0c2643, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733473128936 2024-12-06T08:18:50,304 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting d2b4663095234eafbb4a5bbe797a3e9a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733473129586 2024-12-06T08:18:50,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741923_1099 (size=12301) 2024-12-06T08:18:50,322 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#85 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:50,323 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/cf0b2f24026e497999dce1a7d0585ab3 is 50, key is test_row_0/C:col10/1733473129702/Put/seqid=0 2024-12-06T08:18:50,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:50,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:50,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-06T08:18:50,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741924_1100 (size=13221) 2024-12-06T08:18:50,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473190372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473190374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473190374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473190478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473190480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473190481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473190682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,691 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/9262a91827e440c88819ad6f530e4148 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9262a91827e440c88819ad6f530e4148 2024-12-06T08:18:50,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473190687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:50,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473190689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:50,704 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into 9262a91827e440c88819ad6f530e4148(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:50,704 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:50,704 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=13, startTime=1733473130215; duration=0sec 2024-12-06T08:18:50,704 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:50,704 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:50,708 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/78d34f6ac2124bfeab7fb5a995b51189 2024-12-06T08:18:50,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/70fb068973a341429e256e31b19412f7 is 50, key is test_row_0/C:col10/1733473129708/Put/seqid=0 2024-12-06T08:18:50,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741925_1101 (size=12301) 2024-12-06T08:18:50,743 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/70fb068973a341429e256e31b19412f7 2024-12-06T08:18:50,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/7f313d72156a4410bdcc69a02cb2393f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7f313d72156a4410bdcc69a02cb2393f 2024-12-06T08:18:50,760 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7f313d72156a4410bdcc69a02cb2393f, entries=150, sequenceid=406, filesize=12.0 K 2024-12-06T08:18:50,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/78d34f6ac2124bfeab7fb5a995b51189 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/78d34f6ac2124bfeab7fb5a995b51189 2024-12-06T08:18:50,773 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/cf0b2f24026e497999dce1a7d0585ab3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cf0b2f24026e497999dce1a7d0585ab3 2024-12-06T08:18:50,784 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/78d34f6ac2124bfeab7fb5a995b51189, entries=150, sequenceid=406, filesize=12.0 K 2024-12-06T08:18:50,785 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into cf0b2f24026e497999dce1a7d0585ab3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:50,785 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:50,785 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=13, startTime=1733473130217; duration=0sec 2024-12-06T08:18:50,785 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:50,785 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:50,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/70fb068973a341429e256e31b19412f7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/70fb068973a341429e256e31b19412f7 2024-12-06T08:18:50,799 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/70fb068973a341429e256e31b19412f7, entries=150, sequenceid=406, filesize=12.0 K 2024-12-06T08:18:50,800 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 27b39638a4e82980fa51c6694c44d0ad in 584ms, sequenceid=406, compaction requested=false 2024-12-06T08:18:50,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:50,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:50,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-06T08:18:50,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-06T08:18:50,804 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-06T08:18:50,804 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0460 sec 2024-12-06T08:18:50,806 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.0540 sec 2024-12-06T08:18:50,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-06T08:18:50,858 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-06T08:18:50,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:18:50,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-06T08:18:50,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T08:18:50,864 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:18:50,865 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:18:50,865 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:18:50,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T08:18:50,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:50,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-06T08:18:50,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:50,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:50,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:50,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:50,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:50,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:51,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/06f888581cfa4d5fabb479d12cf43fdc is 50, key is test_row_0/A:col10/1733473130360/Put/seqid=0 2024-12-06T08:18:51,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741926_1102 (size=12301) 2024-12-06T08:18:51,017 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T08:18:51,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:51,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473191035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473191036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473191038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473191139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473191140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473191140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T08:18:51,170 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T08:18:51,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:51,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T08:18:51,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:51,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473191341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473191344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473191348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/06f888581cfa4d5fabb479d12cf43fdc 2024-12-06T08:18:51,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/10c1682554a94c2faeec0248d20b7ca3 is 50, key is test_row_0/B:col10/1733473130360/Put/seqid=0 2024-12-06T08:18:51,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741927_1103 (size=12301) 2024-12-06T08:18:51,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/10c1682554a94c2faeec0248d20b7ca3 2024-12-06T08:18:51,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T08:18:51,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/1da5131b2865451dbb0066114a5937aa is 50, key is test_row_0/C:col10/1733473130360/Put/seqid=0 2024-12-06T08:18:51,486 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T08:18:51,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:51,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741928_1104 (size=12301) 2024-12-06T08:18:51,641 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T08:18:51,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:51,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473191645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473191646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:51,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473191650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,798 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T08:18:51,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:51,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:51,897 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/1da5131b2865451dbb0066114a5937aa 2024-12-06T08:18:51,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/06f888581cfa4d5fabb479d12cf43fdc as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/06f888581cfa4d5fabb479d12cf43fdc 2024-12-06T08:18:51,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/06f888581cfa4d5fabb479d12cf43fdc, entries=150, sequenceid=436, filesize=12.0 K 2024-12-06T08:18:51,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/10c1682554a94c2faeec0248d20b7ca3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/10c1682554a94c2faeec0248d20b7ca3 2024-12-06T08:18:51,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/10c1682554a94c2faeec0248d20b7ca3, entries=150, sequenceid=436, filesize=12.0 K 2024-12-06T08:18:51,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/1da5131b2865451dbb0066114a5937aa as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/1da5131b2865451dbb0066114a5937aa 2024-12-06T08:18:51,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/1da5131b2865451dbb0066114a5937aa, entries=150, sequenceid=436, filesize=12.0 K 2024-12-06T08:18:51,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 27b39638a4e82980fa51c6694c44d0ad in 947ms, sequenceid=436, compaction requested=true 2024-12-06T08:18:51,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:51,935 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:51,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:51,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:51,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:51,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:51,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:51,935 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:51,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:51,936 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:51,936 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:51,936 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,937 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9262a91827e440c88819ad6f530e4148, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7f313d72156a4410bdcc69a02cb2393f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/06f888581cfa4d5fabb479d12cf43fdc] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=36.9 K 2024-12-06T08:18:51,937 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:51,937 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:51,937 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,938 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9262a91827e440c88819ad6f530e4148, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733473129586 2024-12-06T08:18:51,938 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/347163e905054d3986f713b32b54f16a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/78d34f6ac2124bfeab7fb5a995b51189, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/10c1682554a94c2faeec0248d20b7ca3] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=36.9 K 2024-12-06T08:18:51,938 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f313d72156a4410bdcc69a02cb2393f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1733473129708 2024-12-06T08:18:51,938 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 347163e905054d3986f713b32b54f16a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733473129586 2024-12-06T08:18:51,939 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 78d34f6ac2124bfeab7fb5a995b51189, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1733473129708 2024-12-06T08:18:51,939 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06f888581cfa4d5fabb479d12cf43fdc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1733473130360 2024-12-06T08:18:51,939 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 10c1682554a94c2faeec0248d20b7ca3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1733473130360 2024-12-06T08:18:51,952 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:51,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-06T08:18:51,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:51,953 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-06T08:18:51,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:51,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:51,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:51,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:51,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:51,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:51,966 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#90 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:51,967 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/fe40919e14984957b94de9b6bd42f666 is 50, key is test_row_0/B:col10/1733473130360/Put/seqid=0 2024-12-06T08:18:51,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T08:18:51,981 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#91 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:51,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/6c4de9d678ef4846808ba06b42faae50 is 50, key is test_row_0/A:col10/1733473131035/Put/seqid=0 2024-12-06T08:18:51,982 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/aa2d96e29c49402e9bd6c3d6f8d86096 is 50, key is test_row_0/A:col10/1733473130360/Put/seqid=0 2024-12-06T08:18:51,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741929_1105 (size=13323) 2024-12-06T08:18:51,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741930_1106 (size=13323) 2024-12-06T08:18:52,004 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/fe40919e14984957b94de9b6bd42f666 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fe40919e14984957b94de9b6bd42f666 2024-12-06T08:18:52,011 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into fe40919e14984957b94de9b6bd42f666(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:52,011 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:52,012 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=13, startTime=1733473131935; duration=0sec 2024-12-06T08:18:52,014 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:52,014 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:52,014 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:52,016 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:52,016 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:52,016 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:52,017 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cf0b2f24026e497999dce1a7d0585ab3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/70fb068973a341429e256e31b19412f7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/1da5131b2865451dbb0066114a5937aa] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=36.9 K 2024-12-06T08:18:52,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741931_1107 (size=12301) 2024-12-06T08:18:52,017 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting cf0b2f24026e497999dce1a7d0585ab3, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733473129586 2024-12-06T08:18:52,020 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 70fb068973a341429e256e31b19412f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1733473129708 2024-12-06T08:18:52,020 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/6c4de9d678ef4846808ba06b42faae50 2024-12-06T08:18:52,023 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1da5131b2865451dbb0066114a5937aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1733473130360 2024-12-06T08:18:52,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/f8ab5b31451148c1840d8054b25d7de9 is 50, key is test_row_0/B:col10/1733473131035/Put/seqid=0 2024-12-06T08:18:52,040 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:52,041 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/0c172fa6492f4117b8ab7923aaa5058d is 50, key is test_row_0/C:col10/1733473130360/Put/seqid=0 2024-12-06T08:18:52,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741932_1108 (size=12301) 2024-12-06T08:18:52,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741933_1109 (size=13323) 2024-12-06T08:18:52,098 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/0c172fa6492f4117b8ab7923aaa5058d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/0c172fa6492f4117b8ab7923aaa5058d 2024-12-06T08:18:52,106 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into 0c172fa6492f4117b8ab7923aaa5058d(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:52,106 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:52,106 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=13, startTime=1733473131935; duration=0sec 2024-12-06T08:18:52,106 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:52,106 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:52,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:52,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:52,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473192199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473192201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473192202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473192309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473192309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473192310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733473192330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,332 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8188 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:18:52,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41076 deadline: 1733473192352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,353 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8209 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:18:52,400 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/aa2d96e29c49402e9bd6c3d6f8d86096 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/aa2d96e29c49402e9bd6c3d6f8d86096 2024-12-06T08:18:52,407 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into aa2d96e29c49402e9bd6c3d6f8d86096(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:52,407 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:52,408 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=13, startTime=1733473131935; duration=0sec 2024-12-06T08:18:52,408 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:52,408 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:52,483 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/f8ab5b31451148c1840d8054b25d7de9 2024-12-06T08:18:52,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/385b73d27aae4311b41d361e6b43d338 is 50, key is test_row_0/C:col10/1733473131035/Put/seqid=0 2024-12-06T08:18:52,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741934_1110 (size=12301) 2024-12-06T08:18:52,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473192511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,512 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/385b73d27aae4311b41d361e6b43d338 2024-12-06T08:18:52,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473192512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473192513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/6c4de9d678ef4846808ba06b42faae50 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6c4de9d678ef4846808ba06b42faae50 2024-12-06T08:18:52,530 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6c4de9d678ef4846808ba06b42faae50, entries=150, sequenceid=445, filesize=12.0 K 2024-12-06T08:18:52,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/f8ab5b31451148c1840d8054b25d7de9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f8ab5b31451148c1840d8054b25d7de9 2024-12-06T08:18:52,542 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f8ab5b31451148c1840d8054b25d7de9, entries=150, sequenceid=445, filesize=12.0 K 2024-12-06T08:18:52,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/385b73d27aae4311b41d361e6b43d338 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/385b73d27aae4311b41d361e6b43d338 2024-12-06T08:18:52,556 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/385b73d27aae4311b41d361e6b43d338, entries=150, sequenceid=445, filesize=12.0 K 2024-12-06T08:18:52,557 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 27b39638a4e82980fa51c6694c44d0ad in 604ms, sequenceid=445, compaction requested=false 2024-12-06T08:18:52,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:52,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:52,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-06T08:18:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-06T08:18:52,561 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-06T08:18:52,561 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6940 sec 2024-12-06T08:18:52,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.7020 sec 2024-12-06T08:18:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:52,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-06T08:18:52,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:52,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:52,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:52,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:52,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:52,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:52,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473192822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/375fa80d86a9435e947269b0ac8e8cda is 50, key is test_row_0/A:col10/1733473132817/Put/seqid=0 2024-12-06T08:18:52,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473192823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473192828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741935_1111 (size=12301) 2024-12-06T08:18:52,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473192926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473192930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473192930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:52,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-06T08:18:52,969 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-06T08:18:52,971 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:18:52,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-06T08:18:52,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T08:18:52,974 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:18:52,975 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:18:52,975 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:18:53,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T08:18:53,129 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-06T08:18:53,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:53,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:53,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473193131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473193133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473193138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/375fa80d86a9435e947269b0ac8e8cda 2024-12-06T08:18:53,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/c4fc1af33da04770964a0831fc25fd49 is 50, key is test_row_0/B:col10/1733473132817/Put/seqid=0 2024-12-06T08:18:53,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741936_1112 (size=12301) 2024-12-06T08:18:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T08:18:53,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/c4fc1af33da04770964a0831fc25fd49 2024-12-06T08:18:53,284 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-06T08:18:53,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:53,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,286 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/25fe8aaa64fa420a812580a994de85ed is 50, key is test_row_0/C:col10/1733473132817/Put/seqid=0 2024-12-06T08:18:53,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741937_1113 (size=12301) 2024-12-06T08:18:53,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473193436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,439 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473193440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-06T08:18:53,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:53,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473193445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T08:18:53,604 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-06T08:18:53,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:53,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:18:53,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/25fe8aaa64fa420a812580a994de85ed 2024-12-06T08:18:53,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/375fa80d86a9435e947269b0ac8e8cda as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/375fa80d86a9435e947269b0ac8e8cda 2024-12-06T08:18:53,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/375fa80d86a9435e947269b0ac8e8cda, entries=150, sequenceid=477, filesize=12.0 K 2024-12-06T08:18:53,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/c4fc1af33da04770964a0831fc25fd49 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c4fc1af33da04770964a0831fc25fd49 2024-12-06T08:18:53,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c4fc1af33da04770964a0831fc25fd49, entries=150, sequenceid=477, filesize=12.0 K 2024-12-06T08:18:53,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/25fe8aaa64fa420a812580a994de85ed as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/25fe8aaa64fa420a812580a994de85ed 2024-12-06T08:18:53,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/25fe8aaa64fa420a812580a994de85ed, entries=150, sequenceid=477, filesize=12.0 K 2024-12-06T08:18:53,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 27b39638a4e82980fa51c6694c44d0ad in 916ms, sequenceid=477, compaction requested=true 2024-12-06T08:18:53,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:53,736 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:53,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:53,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:53,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:53,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:53,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:53,736 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:53,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:53,748 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:53,748 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:53,748 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,748 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/aa2d96e29c49402e9bd6c3d6f8d86096, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6c4de9d678ef4846808ba06b42faae50, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/375fa80d86a9435e947269b0ac8e8cda] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=37.0 K 2024-12-06T08:18:53,748 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:53,748 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:53,748 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,749 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fe40919e14984957b94de9b6bd42f666, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f8ab5b31451148c1840d8054b25d7de9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c4fc1af33da04770964a0831fc25fd49] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=37.0 K 2024-12-06T08:18:53,750 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting fe40919e14984957b94de9b6bd42f666, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1733473130360 2024-12-06T08:18:53,750 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa2d96e29c49402e9bd6c3d6f8d86096, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1733473130360 2024-12-06T08:18:53,751 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f8ab5b31451148c1840d8054b25d7de9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733473130998 2024-12-06T08:18:53,751 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c4de9d678ef4846808ba06b42faae50, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733473130998 2024-12-06T08:18:53,752 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 375fa80d86a9435e947269b0ac8e8cda, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1733473132196 2024-12-06T08:18:53,752 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting c4fc1af33da04770964a0831fc25fd49, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1733473132196 2024-12-06T08:18:53,757 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-06T08:18:53,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,758 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-06T08:18:53,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:53,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:53,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:53,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:53,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:53,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:53,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/95f9fd3bba2d4b24a1764c5ccece4195 is 50, key is test_row_0/A:col10/1733473132823/Put/seqid=0 2024-12-06T08:18:53,770 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#100 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:53,771 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/4a4b45945af54bd396ed75bbd4c68339 is 50, key is test_row_0/B:col10/1733473132817/Put/seqid=0 2024-12-06T08:18:53,784 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#101 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:53,785 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/c71e71a19a0a4de59c19f9d1afa54f52 is 50, key is test_row_0/A:col10/1733473132817/Put/seqid=0 2024-12-06T08:18:53,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741938_1114 (size=12301) 2024-12-06T08:18:53,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741939_1115 (size=13425) 2024-12-06T08:18:53,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741940_1116 (size=13425) 2024-12-06T08:18:53,811 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/c71e71a19a0a4de59c19f9d1afa54f52 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c71e71a19a0a4de59c19f9d1afa54f52 2024-12-06T08:18:53,818 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into c71e71a19a0a4de59c19f9d1afa54f52(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:53,819 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:53,819 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=13, startTime=1733473133736; duration=0sec 2024-12-06T08:18:53,819 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:53,819 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:53,819 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:53,822 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:53,822 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:53,822 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:53,822 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/0c172fa6492f4117b8ab7923aaa5058d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/385b73d27aae4311b41d361e6b43d338, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/25fe8aaa64fa420a812580a994de85ed] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=37.0 K 2024-12-06T08:18:53,823 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c172fa6492f4117b8ab7923aaa5058d, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1733473130360 2024-12-06T08:18:53,823 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 385b73d27aae4311b41d361e6b43d338, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733473130998 2024-12-06T08:18:53,824 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25fe8aaa64fa420a812580a994de85ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1733473132196 2024-12-06T08:18:53,846 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#102 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:53,847 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/051a5417a399412a8a139b4e344e2a9a is 50, key is test_row_0/C:col10/1733473132817/Put/seqid=0 2024-12-06T08:18:53,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741941_1117 (size=13425) 2024-12-06T08:18:53,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. as already flushing 2024-12-06T08:18:53,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:53,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473193978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:53,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473193978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:53,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:53,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473193979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:54,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T08:18:54,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:54,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473194082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:54,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:54,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473194082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:54,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:54,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473194083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:54,194 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/95f9fd3bba2d4b24a1764c5ccece4195 2024-12-06T08:18:54,201 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/4a4b45945af54bd396ed75bbd4c68339 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4a4b45945af54bd396ed75bbd4c68339 2024-12-06T08:18:54,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/8ba5e5ea38bd4ffb938727a3bc190516 is 50, key is test_row_0/B:col10/1733473132823/Put/seqid=0 2024-12-06T08:18:54,212 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into 4a4b45945af54bd396ed75bbd4c68339(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:54,212 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:54,212 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=13, startTime=1733473133736; duration=0sec 2024-12-06T08:18:54,212 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:54,212 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:54,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741942_1118 (size=12301) 2024-12-06T08:18:54,273 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/051a5417a399412a8a139b4e344e2a9a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/051a5417a399412a8a139b4e344e2a9a 2024-12-06T08:18:54,280 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into 051a5417a399412a8a139b4e344e2a9a(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:54,280 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:54,280 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=13, startTime=1733473133736; duration=0sec 2024-12-06T08:18:54,281 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:54,281 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:54,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:54,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473194286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:54,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:54,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473194287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473194286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:54,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:54,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473194591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:54,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:54,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473194591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:54,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:54,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473194592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:54,625 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/8ba5e5ea38bd4ffb938727a3bc190516 2024-12-06T08:18:54,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/8177ac51982a45f1a4ac4dbdf8a194aa is 50, key is test_row_0/C:col10/1733473132823/Put/seqid=0 2024-12-06T08:18:54,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741943_1119 (size=12301) 2024-12-06T08:18:55,040 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/8177ac51982a45f1a4ac4dbdf8a194aa 2024-12-06T08:18:55,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/95f9fd3bba2d4b24a1764c5ccece4195 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/95f9fd3bba2d4b24a1764c5ccece4195 2024-12-06T08:18:55,056 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/95f9fd3bba2d4b24a1764c5ccece4195, entries=150, sequenceid=484, filesize=12.0 K 2024-12-06T08:18:55,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/8ba5e5ea38bd4ffb938727a3bc190516 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/8ba5e5ea38bd4ffb938727a3bc190516 2024-12-06T08:18:55,063 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/8ba5e5ea38bd4ffb938727a3bc190516, entries=150, sequenceid=484, filesize=12.0 K 2024-12-06T08:18:55,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/8177ac51982a45f1a4ac4dbdf8a194aa as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/8177ac51982a45f1a4ac4dbdf8a194aa 2024-12-06T08:18:55,070 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/8177ac51982a45f1a4ac4dbdf8a194aa, entries=150, sequenceid=484, filesize=12.0 K 2024-12-06T08:18:55,072 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 27b39638a4e82980fa51c6694c44d0ad in 1314ms, sequenceid=484, compaction requested=false 2024-12-06T08:18:55,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:55,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:55,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-06T08:18:55,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-06T08:18:55,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-06T08:18:55,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0990 sec 2024-12-06T08:18:55,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T08:18:55,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 2.1080 sec 2024-12-06T08:18:55,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:55,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-06T08:18:55,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:55,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:55,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:55,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:55,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:55,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:55,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/2e61ed19776b410a82daddb47f9c1858 is 50, key is test_row_0/A:col10/1733473133977/Put/seqid=0 2024-12-06T08:18:55,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473195105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473195106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473195105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741944_1120 (size=14741) 2024-12-06T08:18:55,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473195211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473195212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473195213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473195415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473195416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473195415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/2e61ed19776b410a82daddb47f9c1858 2024-12-06T08:18:55,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/fc4712a89566431abe5f7ad5657c4738 is 50, key is test_row_0/B:col10/1733473133977/Put/seqid=0 2024-12-06T08:18:55,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741945_1121 (size=12301) 2024-12-06T08:18:55,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473195719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473195720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473195721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:55,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/fc4712a89566431abe5f7ad5657c4738 2024-12-06T08:18:55,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/97dfdd273e9c42a1a68f0c032a3f3ec5 is 50, key is test_row_0/C:col10/1733473133977/Put/seqid=0 2024-12-06T08:18:55,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741946_1122 (size=12301) 2024-12-06T08:18:56,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:56,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1733473196223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:56,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:56,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41086 deadline: 1733473196224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:56,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:18:56,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41072 deadline: 1733473196225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:18:56,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/97dfdd273e9c42a1a68f0c032a3f3ec5 2024-12-06T08:18:56,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/2e61ed19776b410a82daddb47f9c1858 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/2e61ed19776b410a82daddb47f9c1858 2024-12-06T08:18:56,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/2e61ed19776b410a82daddb47f9c1858, entries=200, sequenceid=517, filesize=14.4 K 2024-12-06T08:18:56,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/fc4712a89566431abe5f7ad5657c4738 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fc4712a89566431abe5f7ad5657c4738 2024-12-06T08:18:56,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fc4712a89566431abe5f7ad5657c4738, entries=150, sequenceid=517, filesize=12.0 K 2024-12-06T08:18:56,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/97dfdd273e9c42a1a68f0c032a3f3ec5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/97dfdd273e9c42a1a68f0c032a3f3ec5 2024-12-06T08:18:56,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/97dfdd273e9c42a1a68f0c032a3f3ec5, entries=150, sequenceid=517, filesize=12.0 K 2024-12-06T08:18:56,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=33.54 KB/34350 for 27b39638a4e82980fa51c6694c44d0ad in 1272ms, sequenceid=517, compaction requested=true 2024-12-06T08:18:56,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:56,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:18:56,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:56,373 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:56,373 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:56,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:18:56,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:56,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 27b39638a4e82980fa51c6694c44d0ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:18:56,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:56,374 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:56,374 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/A is initiating minor compaction (all files) 2024-12-06T08:18:56,374 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/A in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:56,374 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c71e71a19a0a4de59c19f9d1afa54f52, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/95f9fd3bba2d4b24a1764c5ccece4195, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/2e61ed19776b410a82daddb47f9c1858] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=39.5 K 2024-12-06T08:18:56,375 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:56,375 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/B is initiating minor compaction (all files) 2024-12-06T08:18:56,375 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/B in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:56,375 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4a4b45945af54bd396ed75bbd4c68339, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/8ba5e5ea38bd4ffb938727a3bc190516, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fc4712a89566431abe5f7ad5657c4738] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=37.1 K 2024-12-06T08:18:56,375 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c71e71a19a0a4de59c19f9d1afa54f52, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1733473132196 2024-12-06T08:18:56,375 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a4b45945af54bd396ed75bbd4c68339, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1733473132196 2024-12-06T08:18:56,376 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95f9fd3bba2d4b24a1764c5ccece4195, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733473132821 2024-12-06T08:18:56,376 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ba5e5ea38bd4ffb938727a3bc190516, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733473132821 2024-12-06T08:18:56,376 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e61ed19776b410a82daddb47f9c1858, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1733473133972 2024-12-06T08:18:56,376 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting fc4712a89566431abe5f7ad5657c4738, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1733473133972 2024-12-06T08:18:56,399 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#A#compaction#108 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:56,399 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/7db5f1fa9dd740ddbc8748746af174a9 is 50, key is test_row_0/A:col10/1733473133977/Put/seqid=0 2024-12-06T08:18:56,400 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#B#compaction#109 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:56,400 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/815faef7e5ed486284bbb5420d02fd1b is 50, key is test_row_0/B:col10/1733473133977/Put/seqid=0 2024-12-06T08:18:56,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741947_1123 (size=13527) 2024-12-06T08:18:56,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741948_1124 (size=13527) 2024-12-06T08:18:56,422 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/815faef7e5ed486284bbb5420d02fd1b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/815faef7e5ed486284bbb5420d02fd1b 2024-12-06T08:18:56,430 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/B of 27b39638a4e82980fa51c6694c44d0ad into 815faef7e5ed486284bbb5420d02fd1b(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:56,431 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:56,431 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/B, priority=13, startTime=1733473136373; duration=0sec 2024-12-06T08:18:56,431 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:18:56,431 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:B 2024-12-06T08:18:56,431 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:18:56,434 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:18:56,434 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 27b39638a4e82980fa51c6694c44d0ad/C is initiating minor compaction (all files) 2024-12-06T08:18:56,434 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 27b39638a4e82980fa51c6694c44d0ad/C in TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:18:56,434 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/051a5417a399412a8a139b4e344e2a9a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/8177ac51982a45f1a4ac4dbdf8a194aa, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/97dfdd273e9c42a1a68f0c032a3f3ec5] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp, totalSize=37.1 K 2024-12-06T08:18:56,435 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 051a5417a399412a8a139b4e344e2a9a, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1733473132196 2024-12-06T08:18:56,435 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 8177ac51982a45f1a4ac4dbdf8a194aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733473132821 2024-12-06T08:18:56,435 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 97dfdd273e9c42a1a68f0c032a3f3ec5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1733473133972 2024-12-06T08:18:56,459 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 27b39638a4e82980fa51c6694c44d0ad#C#compaction#110 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:18:56,460 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/b4daa98c7bf14eaaad019b131a20fe52 is 50, key is test_row_0/C:col10/1733473133977/Put/seqid=0 2024-12-06T08:18:56,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741949_1125 (size=13527) 2024-12-06T08:18:56,485 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/b4daa98c7bf14eaaad019b131a20fe52 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/b4daa98c7bf14eaaad019b131a20fe52 2024-12-06T08:18:56,493 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/C of 27b39638a4e82980fa51c6694c44d0ad into b4daa98c7bf14eaaad019b131a20fe52(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:56,493 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:56,493 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/C, priority=13, startTime=1733473136373; duration=0sec 2024-12-06T08:18:56,493 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:56,493 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:C 2024-12-06T08:18:56,825 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/7db5f1fa9dd740ddbc8748746af174a9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7db5f1fa9dd740ddbc8748746af174a9 2024-12-06T08:18:56,831 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 27b39638a4e82980fa51c6694c44d0ad/A of 27b39638a4e82980fa51c6694c44d0ad into 7db5f1fa9dd740ddbc8748746af174a9(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:18:56,831 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:56,831 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad., storeName=27b39638a4e82980fa51c6694c44d0ad/A, priority=13, startTime=1733473136372; duration=0sec 2024-12-06T08:18:56,832 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:18:56,832 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 27b39638a4e82980fa51c6694c44d0ad:A 2024-12-06T08:18:57,001 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:65195 2024-12-06T08:18:57,001 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b5f27aa to 127.0.0.1:65195 2024-12-06T08:18:57,001 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x490457fd to 127.0.0.1:65195 2024-12-06T08:18:57,001 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:18:57,001 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:18:57,001 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:18:57,003 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:65195 2024-12-06T08:18:57,003 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:18:57,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-06T08:18:57,080 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-06T08:18:57,226 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1324ee83 to 127.0.0.1:65195 2024-12-06T08:18:57,226 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:18:57,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:18:57,236 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x048068a5 to 127.0.0.1:65195 2024-12-06T08:18:57,236 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:18:57,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:18:57,236 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:65195 2024-12-06T08:18:57,236 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:18:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:18:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:18:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:18:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:18:57,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/c8cf594e2dc7476b818364c404db2df4 is 50, key is test_row_0/A:col10/1733473137234/Put/seqid=0 2024-12-06T08:18:57,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741950_1126 (size=12301) 2024-12-06T08:18:57,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/c8cf594e2dc7476b818364c404db2df4 2024-12-06T08:18:57,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/927d4cf44a574897ae8ccf9e6d871a65 is 50, key is test_row_0/B:col10/1733473137234/Put/seqid=0 2024-12-06T08:18:57,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741951_1127 (size=12301) 2024-12-06T08:18:58,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/927d4cf44a574897ae8ccf9e6d871a65 2024-12-06T08:18:58,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/04cba9e80b494c2c8a3d02e82f02f2ee is 50, key is test_row_0/C:col10/1733473137234/Put/seqid=0 2024-12-06T08:18:58,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741952_1128 (size=12301) 2024-12-06T08:18:58,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/04cba9e80b494c2c8a3d02e82f02f2ee 2024-12-06T08:18:58,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/c8cf594e2dc7476b818364c404db2df4 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c8cf594e2dc7476b818364c404db2df4 2024-12-06T08:18:58,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c8cf594e2dc7476b818364c404db2df4, entries=150, sequenceid=531, filesize=12.0 K 2024-12-06T08:18:58,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/927d4cf44a574897ae8ccf9e6d871a65 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/927d4cf44a574897ae8ccf9e6d871a65 2024-12-06T08:18:58,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/927d4cf44a574897ae8ccf9e6d871a65, entries=150, sequenceid=531, filesize=12.0 K 2024-12-06T08:18:58,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/04cba9e80b494c2c8a3d02e82f02f2ee as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/04cba9e80b494c2c8a3d02e82f02f2ee 2024-12-06T08:18:58,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/04cba9e80b494c2c8a3d02e82f02f2ee, entries=150, sequenceid=531, filesize=12.0 K 2024-12-06T08:18:58,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for 27b39638a4e82980fa51c6694c44d0ad in 1263ms, sequenceid=531, compaction requested=false 2024-12-06T08:18:58,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:18:59,737 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:19:02,375 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:65195 2024-12-06T08:19:02,375 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:02,392 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53b8a93e to 127.0.0.1:65195 2024-12-06T08:19:02,392 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:02,392 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 100 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 112 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 106 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5018 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5158 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2270 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6803 rows 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2258 2024-12-06T08:19:02,393 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6767 rows 2024-12-06T08:19:02,393 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:19:02,393 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63607639 to 127.0.0.1:65195 2024-12-06T08:19:02,393 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:02,403 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T08:19:02,408 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T08:19:02,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:02,416 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473142416"}]},"ts":"1733473142416"} 2024-12-06T08:19:02,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T08:19:02,418 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T08:19:02,420 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T08:19:02,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T08:19:02,426 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=27b39638a4e82980fa51c6694c44d0ad, UNASSIGN}] 2024-12-06T08:19:02,427 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=27b39638a4e82980fa51c6694c44d0ad, UNASSIGN 2024-12-06T08:19:02,428 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=27b39638a4e82980fa51c6694c44d0ad, regionState=CLOSING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:02,429 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T08:19:02,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure 27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:19:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T08:19:02,586 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:02,587 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:19:02,587 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T08:19:02,588 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing 27b39638a4e82980fa51c6694c44d0ad, disabling compactions & flushes 2024-12-06T08:19:02,588 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:19:02,588 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:19:02,588 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. after waiting 0 ms 2024-12-06T08:19:02,588 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:19:02,588 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(2837): Flushing 27b39638a4e82980fa51c6694c44d0ad 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-06T08:19:02,589 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=A 2024-12-06T08:19:02,589 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:02,589 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=B 2024-12-06T08:19:02,589 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:02,589 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 27b39638a4e82980fa51c6694c44d0ad, store=C 2024-12-06T08:19:02,589 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:02,593 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/04aaea443b7f4a0c8e8f4f7cb904a5be is 50, key is test_row_1/A:col10/1733473142390/Put/seqid=0 2024-12-06T08:19:02,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741953_1129 (size=9857) 2024-12-06T08:19:02,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T08:19:02,998 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=536 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/04aaea443b7f4a0c8e8f4f7cb904a5be 2024-12-06T08:19:03,008 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/ef9e3a9517c242ceb5bf5273a489d133 is 50, key is test_row_1/B:col10/1733473142390/Put/seqid=0 2024-12-06T08:19:03,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741954_1130 (size=9857) 2024-12-06T08:19:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T08:19:03,417 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=536 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/ef9e3a9517c242ceb5bf5273a489d133 2024-12-06T08:19:03,425 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/2733f8184c3247c3a8f38bb4c1a2cde5 is 50, key is test_row_1/C:col10/1733473142390/Put/seqid=0 2024-12-06T08:19:03,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741955_1131 (size=9857) 2024-12-06T08:19:03,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T08:19:03,831 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=536 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/2733f8184c3247c3a8f38bb4c1a2cde5 2024-12-06T08:19:03,840 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/A/04aaea443b7f4a0c8e8f4f7cb904a5be as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/04aaea443b7f4a0c8e8f4f7cb904a5be 2024-12-06T08:19:03,848 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/04aaea443b7f4a0c8e8f4f7cb904a5be, entries=100, sequenceid=536, filesize=9.6 K 2024-12-06T08:19:03,849 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/B/ef9e3a9517c242ceb5bf5273a489d133 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/ef9e3a9517c242ceb5bf5273a489d133 2024-12-06T08:19:03,854 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/ef9e3a9517c242ceb5bf5273a489d133, entries=100, sequenceid=536, filesize=9.6 K 2024-12-06T08:19:03,855 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/.tmp/C/2733f8184c3247c3a8f38bb4c1a2cde5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/2733f8184c3247c3a8f38bb4c1a2cde5 2024-12-06T08:19:03,860 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/2733f8184c3247c3a8f38bb4c1a2cde5, entries=100, sequenceid=536, filesize=9.6 K 2024-12-06T08:19:03,861 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for 27b39638a4e82980fa51c6694c44d0ad in 1273ms, sequenceid=536, compaction requested=true 2024-12-06T08:19:03,864 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/92a40c9b3e584983a64d78e2a0549523, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/b5b1735e7b314978947e059aec313068, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/577eb67e69aa4d1795745a5141183895, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/56bbd8bb5fee4cdba0f7cdf26fa562b5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bd77b7f27cae417cb6e3bbe0766fd31f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d00d063c68b742f2ac036e903e3f0719, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c48eb88fb3974965979ffad62b3233f7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/5ecf6fe96eac44228ca6b44bd9f07dbd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9c594a46eaca4c89a29a9d383f2a2ac1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/62609bbe693b4a81ac379173da05d75a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/cc62b5ec5e59450cb67679fc9fa8fb1a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7043c2d916f5452582aee080fb0efdce, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/148a8f623a214f69a9aaae68a239d957, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d1d1bac9bb6c45acaf6a4cc6d692e962, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/f41e4968ad5b45619f115ef89eee8a93, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/4b6a43d571dc48e9866f713139a0e7b9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/94f1e863f7244639937884b410a5a690, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6dc08bdfade64af1a5cbbc4ac267c845, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/478a88e539984ae09fad60a300454b13, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/1f4662d905ab46e0964bb8fc47fb5c85, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/e1c715fdc7bb4cc78e9cc9009b4ad4ea, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/dbd33bc4a8034ed4840ee5825937789a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bc8f611e34c34ecdaceefc1390a08994, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/30d6f2667bf84e38b0bddfb152e14641, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/759a315e44974a85a656050771b6057e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/05d8403d9f3f4316b2c0de647ec92977, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9262a91827e440c88819ad6f530e4148, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/adee3a4c153146ceabbb731777b18343, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7f313d72156a4410bdcc69a02cb2393f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/aa2d96e29c49402e9bd6c3d6f8d86096, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/06f888581cfa4d5fabb479d12cf43fdc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6c4de9d678ef4846808ba06b42faae50, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c71e71a19a0a4de59c19f9d1afa54f52, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/375fa80d86a9435e947269b0ac8e8cda, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/95f9fd3bba2d4b24a1764c5ccece4195, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/2e61ed19776b410a82daddb47f9c1858] to archive 2024-12-06T08:19:03,868 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:19:03,881 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/92a40c9b3e584983a64d78e2a0549523 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/92a40c9b3e584983a64d78e2a0549523 2024-12-06T08:19:03,883 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/b5b1735e7b314978947e059aec313068 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/b5b1735e7b314978947e059aec313068 2024-12-06T08:19:03,884 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/577eb67e69aa4d1795745a5141183895 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/577eb67e69aa4d1795745a5141183895 2024-12-06T08:19:03,886 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/56bbd8bb5fee4cdba0f7cdf26fa562b5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/56bbd8bb5fee4cdba0f7cdf26fa562b5 2024-12-06T08:19:03,887 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bd77b7f27cae417cb6e3bbe0766fd31f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bd77b7f27cae417cb6e3bbe0766fd31f 2024-12-06T08:19:03,889 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d00d063c68b742f2ac036e903e3f0719 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d00d063c68b742f2ac036e903e3f0719 2024-12-06T08:19:03,890 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c48eb88fb3974965979ffad62b3233f7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c48eb88fb3974965979ffad62b3233f7 2024-12-06T08:19:03,894 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/5ecf6fe96eac44228ca6b44bd9f07dbd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/5ecf6fe96eac44228ca6b44bd9f07dbd 2024-12-06T08:19:03,898 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9c594a46eaca4c89a29a9d383f2a2ac1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9c594a46eaca4c89a29a9d383f2a2ac1 2024-12-06T08:19:03,901 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/62609bbe693b4a81ac379173da05d75a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/62609bbe693b4a81ac379173da05d75a 2024-12-06T08:19:03,903 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/cc62b5ec5e59450cb67679fc9fa8fb1a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/cc62b5ec5e59450cb67679fc9fa8fb1a 2024-12-06T08:19:03,904 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7043c2d916f5452582aee080fb0efdce to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7043c2d916f5452582aee080fb0efdce 2024-12-06T08:19:03,906 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/148a8f623a214f69a9aaae68a239d957 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/148a8f623a214f69a9aaae68a239d957 2024-12-06T08:19:03,907 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d1d1bac9bb6c45acaf6a4cc6d692e962 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/d1d1bac9bb6c45acaf6a4cc6d692e962 2024-12-06T08:19:03,908 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/f41e4968ad5b45619f115ef89eee8a93 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/f41e4968ad5b45619f115ef89eee8a93 2024-12-06T08:19:03,910 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/4b6a43d571dc48e9866f713139a0e7b9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/4b6a43d571dc48e9866f713139a0e7b9 2024-12-06T08:19:03,911 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/94f1e863f7244639937884b410a5a690 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/94f1e863f7244639937884b410a5a690 2024-12-06T08:19:03,913 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6dc08bdfade64af1a5cbbc4ac267c845 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6dc08bdfade64af1a5cbbc4ac267c845 2024-12-06T08:19:03,914 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/478a88e539984ae09fad60a300454b13 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/478a88e539984ae09fad60a300454b13 2024-12-06T08:19:03,916 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/1f4662d905ab46e0964bb8fc47fb5c85 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/1f4662d905ab46e0964bb8fc47fb5c85 2024-12-06T08:19:03,917 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/e1c715fdc7bb4cc78e9cc9009b4ad4ea to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/e1c715fdc7bb4cc78e9cc9009b4ad4ea 2024-12-06T08:19:03,918 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/dbd33bc4a8034ed4840ee5825937789a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/dbd33bc4a8034ed4840ee5825937789a 2024-12-06T08:19:03,920 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bc8f611e34c34ecdaceefc1390a08994 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/bc8f611e34c34ecdaceefc1390a08994 2024-12-06T08:19:03,921 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/30d6f2667bf84e38b0bddfb152e14641 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/30d6f2667bf84e38b0bddfb152e14641 2024-12-06T08:19:03,922 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/759a315e44974a85a656050771b6057e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/759a315e44974a85a656050771b6057e 2024-12-06T08:19:03,924 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/05d8403d9f3f4316b2c0de647ec92977 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/05d8403d9f3f4316b2c0de647ec92977 2024-12-06T08:19:03,925 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9262a91827e440c88819ad6f530e4148 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/9262a91827e440c88819ad6f530e4148 2024-12-06T08:19:03,927 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/adee3a4c153146ceabbb731777b18343 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/adee3a4c153146ceabbb731777b18343 2024-12-06T08:19:03,928 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7f313d72156a4410bdcc69a02cb2393f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7f313d72156a4410bdcc69a02cb2393f 2024-12-06T08:19:03,930 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/aa2d96e29c49402e9bd6c3d6f8d86096 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/aa2d96e29c49402e9bd6c3d6f8d86096 2024-12-06T08:19:03,931 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/06f888581cfa4d5fabb479d12cf43fdc to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/06f888581cfa4d5fabb479d12cf43fdc 2024-12-06T08:19:03,932 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6c4de9d678ef4846808ba06b42faae50 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/6c4de9d678ef4846808ba06b42faae50 2024-12-06T08:19:03,933 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c71e71a19a0a4de59c19f9d1afa54f52 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c71e71a19a0a4de59c19f9d1afa54f52 2024-12-06T08:19:03,934 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/375fa80d86a9435e947269b0ac8e8cda to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/375fa80d86a9435e947269b0ac8e8cda 2024-12-06T08:19:03,936 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/95f9fd3bba2d4b24a1764c5ccece4195 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/95f9fd3bba2d4b24a1764c5ccece4195 2024-12-06T08:19:03,937 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/2e61ed19776b410a82daddb47f9c1858 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/2e61ed19776b410a82daddb47f9c1858 2024-12-06T08:19:03,951 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/0927a4e4d7d94c53a3ccf70a16a3e7a0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/81cef35dd28f48c1a15f8e3295301fb7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f6bbcffa144b4d1fb3b6cb2a21e9f2da, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c0212401db2044c1b29ec99531172de7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c611b7ef7c354a7f9b873ac597bbbfde, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/90b500a6755f47509a1f158cdffe2726, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d0feade4188a4537bec767349718f8c0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0fe94aa1e84480b90d429e7ec3920a2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fb911496ad844ae186b4667cea554b2b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/9d66f6f7d58242b0ae9112d6159cf88f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e196f676be4543288077eaaa86c824ca, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/7d19aa1198434ad5a2236dbbb511be18, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/1389540a335447a886ee96fc6a1f596b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/5230bbf3c27f4827947728e42dd483d6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4f20f9d4e940436ebe0ff73f3b4912dd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/bc13c50ca49c4e10816e8fff06fcd7e0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e5d2e54b0c274c83bf28c878ea42efb5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a6c8806c5e1a42d7bd6d551ebbf73da2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/016c080e592b47cebde06be786a2cc08, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/46808a14ac5c4c8bb4b7d68fc1fabf2c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/abc52584142444f3834b42e6864ca9ee, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/dee2c5be9d5d43d28e782d67dae1eea9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0bc1044923a479e9d08dc15bc98936b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f071cf2fef1340ee90d13a3524dbeb29, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/63564774edc844dc91ccf335c812774a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a84018974bda4530bc3951218f32ef58, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/347163e905054d3986f713b32b54f16a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d401bd95689f4e689a834a1137134375, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/78d34f6ac2124bfeab7fb5a995b51189, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fe40919e14984957b94de9b6bd42f666, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/10c1682554a94c2faeec0248d20b7ca3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f8ab5b31451148c1840d8054b25d7de9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4a4b45945af54bd396ed75bbd4c68339, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c4fc1af33da04770964a0831fc25fd49, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/8ba5e5ea38bd4ffb938727a3bc190516, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fc4712a89566431abe5f7ad5657c4738] to archive 2024-12-06T08:19:03,953 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:19:03,956 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/0927a4e4d7d94c53a3ccf70a16a3e7a0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/0927a4e4d7d94c53a3ccf70a16a3e7a0 2024-12-06T08:19:03,961 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/81cef35dd28f48c1a15f8e3295301fb7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/81cef35dd28f48c1a15f8e3295301fb7 2024-12-06T08:19:03,963 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f6bbcffa144b4d1fb3b6cb2a21e9f2da to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f6bbcffa144b4d1fb3b6cb2a21e9f2da 2024-12-06T08:19:03,965 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c0212401db2044c1b29ec99531172de7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c0212401db2044c1b29ec99531172de7 2024-12-06T08:19:03,966 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c611b7ef7c354a7f9b873ac597bbbfde to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c611b7ef7c354a7f9b873ac597bbbfde 2024-12-06T08:19:03,968 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/90b500a6755f47509a1f158cdffe2726 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/90b500a6755f47509a1f158cdffe2726 2024-12-06T08:19:03,969 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d0feade4188a4537bec767349718f8c0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d0feade4188a4537bec767349718f8c0 2024-12-06T08:19:03,970 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0fe94aa1e84480b90d429e7ec3920a2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0fe94aa1e84480b90d429e7ec3920a2 2024-12-06T08:19:03,971 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fb911496ad844ae186b4667cea554b2b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fb911496ad844ae186b4667cea554b2b 2024-12-06T08:19:03,972 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/9d66f6f7d58242b0ae9112d6159cf88f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/9d66f6f7d58242b0ae9112d6159cf88f 2024-12-06T08:19:03,973 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e196f676be4543288077eaaa86c824ca to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e196f676be4543288077eaaa86c824ca 2024-12-06T08:19:03,975 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/7d19aa1198434ad5a2236dbbb511be18 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/7d19aa1198434ad5a2236dbbb511be18 2024-12-06T08:19:03,976 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/1389540a335447a886ee96fc6a1f596b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/1389540a335447a886ee96fc6a1f596b 2024-12-06T08:19:03,977 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/5230bbf3c27f4827947728e42dd483d6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/5230bbf3c27f4827947728e42dd483d6 2024-12-06T08:19:03,978 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4f20f9d4e940436ebe0ff73f3b4912dd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4f20f9d4e940436ebe0ff73f3b4912dd 2024-12-06T08:19:03,979 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/bc13c50ca49c4e10816e8fff06fcd7e0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/bc13c50ca49c4e10816e8fff06fcd7e0 2024-12-06T08:19:03,980 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e5d2e54b0c274c83bf28c878ea42efb5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e5d2e54b0c274c83bf28c878ea42efb5 2024-12-06T08:19:03,981 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a6c8806c5e1a42d7bd6d551ebbf73da2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a6c8806c5e1a42d7bd6d551ebbf73da2 2024-12-06T08:19:03,982 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/016c080e592b47cebde06be786a2cc08 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/016c080e592b47cebde06be786a2cc08 2024-12-06T08:19:03,983 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/46808a14ac5c4c8bb4b7d68fc1fabf2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/46808a14ac5c4c8bb4b7d68fc1fabf2c 2024-12-06T08:19:03,984 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/abc52584142444f3834b42e6864ca9ee to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/abc52584142444f3834b42e6864ca9ee 2024-12-06T08:19:03,985 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/dee2c5be9d5d43d28e782d67dae1eea9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/dee2c5be9d5d43d28e782d67dae1eea9 2024-12-06T08:19:03,986 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0bc1044923a479e9d08dc15bc98936b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/e0bc1044923a479e9d08dc15bc98936b 2024-12-06T08:19:03,987 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f071cf2fef1340ee90d13a3524dbeb29 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f071cf2fef1340ee90d13a3524dbeb29 2024-12-06T08:19:03,988 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/63564774edc844dc91ccf335c812774a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/63564774edc844dc91ccf335c812774a 2024-12-06T08:19:03,989 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a84018974bda4530bc3951218f32ef58 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/a84018974bda4530bc3951218f32ef58 2024-12-06T08:19:03,990 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/347163e905054d3986f713b32b54f16a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/347163e905054d3986f713b32b54f16a 2024-12-06T08:19:03,991 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d401bd95689f4e689a834a1137134375 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/d401bd95689f4e689a834a1137134375 2024-12-06T08:19:03,992 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/78d34f6ac2124bfeab7fb5a995b51189 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/78d34f6ac2124bfeab7fb5a995b51189 2024-12-06T08:19:03,994 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fe40919e14984957b94de9b6bd42f666 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fe40919e14984957b94de9b6bd42f666 2024-12-06T08:19:03,995 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/10c1682554a94c2faeec0248d20b7ca3 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/10c1682554a94c2faeec0248d20b7ca3 2024-12-06T08:19:03,996 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f8ab5b31451148c1840d8054b25d7de9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/f8ab5b31451148c1840d8054b25d7de9 2024-12-06T08:19:03,997 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4a4b45945af54bd396ed75bbd4c68339 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/4a4b45945af54bd396ed75bbd4c68339 2024-12-06T08:19:03,998 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c4fc1af33da04770964a0831fc25fd49 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/c4fc1af33da04770964a0831fc25fd49 2024-12-06T08:19:03,999 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/8ba5e5ea38bd4ffb938727a3bc190516 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/8ba5e5ea38bd4ffb938727a3bc190516 2024-12-06T08:19:04,000 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fc4712a89566431abe5f7ad5657c4738 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/fc4712a89566431abe5f7ad5657c4738 2024-12-06T08:19:04,001 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7a6480539522489faf7d6b481b3fefb7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/fe44fb8e0bf04591ba61f81fe6b69a1c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/e19ee0e0c78b4f57beb73ddb9f6e69a0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/b3d0cca8dc8c4cafbc3d22e34bbb4ce3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/2cb748387ad64a6985f305cf387bff1a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/07209012e10540bca294f81337200d73, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/68892efa49294e9d88e5fc634d4b330f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7f6224964bb34571935e006c4899ebe0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/67eb73d2620d4493ad8ffd8aafb26a8c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/91815d75eb4f4b30a59ee42eb1ad98cf, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7bb18aeb609f457abe5cfb303fd33622, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6d8096de312147509593e48d453de761, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/3545d55a4bab433fab5ce4a875efecbd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/663f6e286dc04e3694b80f09277d848f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/44d2c83a7d5a45c291e3a516804953c2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/c66786d7546e4924b8772f3977d8fd40, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/377410a8be82443cad4c80c73ff02fec, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/86419d7e4bfa4b2d91e3066720f8d0ea, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cd0457d713ba43ed87f75484ab24b2dc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/37c1d697f4784c8bb3bf366bbd95afc7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/72321a123bac433bac7f936c5d0045d6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/641eaeed1dad411488f656cc12b02b31, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6e5b7cd954c14aeaa8c271ee4b54b4e0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/479e693598c246fc9dafcba99fdb64ce, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6214f3c3ff474addb59fab0907424bcd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/f9687c25ce0d43c480e138977e0c2643, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cf0b2f24026e497999dce1a7d0585ab3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/d2b4663095234eafbb4a5bbe797a3e9a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/70fb068973a341429e256e31b19412f7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/0c172fa6492f4117b8ab7923aaa5058d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/1da5131b2865451dbb0066114a5937aa, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/385b73d27aae4311b41d361e6b43d338, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/051a5417a399412a8a139b4e344e2a9a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/25fe8aaa64fa420a812580a994de85ed, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/8177ac51982a45f1a4ac4dbdf8a194aa, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/97dfdd273e9c42a1a68f0c032a3f3ec5] to archive 2024-12-06T08:19:04,002 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:19:04,004 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7a6480539522489faf7d6b481b3fefb7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7a6480539522489faf7d6b481b3fefb7 2024-12-06T08:19:04,006 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/fe44fb8e0bf04591ba61f81fe6b69a1c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/fe44fb8e0bf04591ba61f81fe6b69a1c 2024-12-06T08:19:04,007 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/e19ee0e0c78b4f57beb73ddb9f6e69a0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/e19ee0e0c78b4f57beb73ddb9f6e69a0 2024-12-06T08:19:04,008 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/b3d0cca8dc8c4cafbc3d22e34bbb4ce3 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/b3d0cca8dc8c4cafbc3d22e34bbb4ce3 2024-12-06T08:19:04,009 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/2cb748387ad64a6985f305cf387bff1a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/2cb748387ad64a6985f305cf387bff1a 2024-12-06T08:19:04,011 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/07209012e10540bca294f81337200d73 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/07209012e10540bca294f81337200d73 2024-12-06T08:19:04,012 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/68892efa49294e9d88e5fc634d4b330f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/68892efa49294e9d88e5fc634d4b330f 2024-12-06T08:19:04,013 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7f6224964bb34571935e006c4899ebe0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7f6224964bb34571935e006c4899ebe0 2024-12-06T08:19:04,014 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/67eb73d2620d4493ad8ffd8aafb26a8c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/67eb73d2620d4493ad8ffd8aafb26a8c 2024-12-06T08:19:04,016 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/91815d75eb4f4b30a59ee42eb1ad98cf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/91815d75eb4f4b30a59ee42eb1ad98cf 2024-12-06T08:19:04,017 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7bb18aeb609f457abe5cfb303fd33622 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/7bb18aeb609f457abe5cfb303fd33622 2024-12-06T08:19:04,018 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6d8096de312147509593e48d453de761 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6d8096de312147509593e48d453de761 2024-12-06T08:19:04,019 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/3545d55a4bab433fab5ce4a875efecbd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/3545d55a4bab433fab5ce4a875efecbd 2024-12-06T08:19:04,021 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/663f6e286dc04e3694b80f09277d848f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/663f6e286dc04e3694b80f09277d848f 2024-12-06T08:19:04,022 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/44d2c83a7d5a45c291e3a516804953c2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/44d2c83a7d5a45c291e3a516804953c2 2024-12-06T08:19:04,023 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/c66786d7546e4924b8772f3977d8fd40 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/c66786d7546e4924b8772f3977d8fd40 2024-12-06T08:19:04,024 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/377410a8be82443cad4c80c73ff02fec to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/377410a8be82443cad4c80c73ff02fec 2024-12-06T08:19:04,025 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/86419d7e4bfa4b2d91e3066720f8d0ea to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/86419d7e4bfa4b2d91e3066720f8d0ea 2024-12-06T08:19:04,026 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cd0457d713ba43ed87f75484ab24b2dc to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cd0457d713ba43ed87f75484ab24b2dc 2024-12-06T08:19:04,027 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/37c1d697f4784c8bb3bf366bbd95afc7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/37c1d697f4784c8bb3bf366bbd95afc7 2024-12-06T08:19:04,029 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/72321a123bac433bac7f936c5d0045d6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/72321a123bac433bac7f936c5d0045d6 2024-12-06T08:19:04,030 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/641eaeed1dad411488f656cc12b02b31 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/641eaeed1dad411488f656cc12b02b31 2024-12-06T08:19:04,031 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6e5b7cd954c14aeaa8c271ee4b54b4e0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6e5b7cd954c14aeaa8c271ee4b54b4e0 2024-12-06T08:19:04,032 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/479e693598c246fc9dafcba99fdb64ce to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/479e693598c246fc9dafcba99fdb64ce 2024-12-06T08:19:04,034 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6214f3c3ff474addb59fab0907424bcd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/6214f3c3ff474addb59fab0907424bcd 2024-12-06T08:19:04,035 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/f9687c25ce0d43c480e138977e0c2643 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/f9687c25ce0d43c480e138977e0c2643 2024-12-06T08:19:04,036 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cf0b2f24026e497999dce1a7d0585ab3 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/cf0b2f24026e497999dce1a7d0585ab3 2024-12-06T08:19:04,037 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/d2b4663095234eafbb4a5bbe797a3e9a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/d2b4663095234eafbb4a5bbe797a3e9a 2024-12-06T08:19:04,039 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/70fb068973a341429e256e31b19412f7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/70fb068973a341429e256e31b19412f7 2024-12-06T08:19:04,040 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/0c172fa6492f4117b8ab7923aaa5058d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/0c172fa6492f4117b8ab7923aaa5058d 2024-12-06T08:19:04,041 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/1da5131b2865451dbb0066114a5937aa to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/1da5131b2865451dbb0066114a5937aa 2024-12-06T08:19:04,042 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/385b73d27aae4311b41d361e6b43d338 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/385b73d27aae4311b41d361e6b43d338 2024-12-06T08:19:04,043 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/051a5417a399412a8a139b4e344e2a9a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/051a5417a399412a8a139b4e344e2a9a 2024-12-06T08:19:04,045 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/25fe8aaa64fa420a812580a994de85ed to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/25fe8aaa64fa420a812580a994de85ed 2024-12-06T08:19:04,046 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/8177ac51982a45f1a4ac4dbdf8a194aa to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/8177ac51982a45f1a4ac4dbdf8a194aa 2024-12-06T08:19:04,047 DEBUG [StoreCloser-TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/97dfdd273e9c42a1a68f0c032a3f3ec5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/97dfdd273e9c42a1a68f0c032a3f3ec5 2024-12-06T08:19:04,052 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/recovered.edits/539.seqid, newMaxSeqId=539, maxSeqId=1 2024-12-06T08:19:04,055 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad. 2024-12-06T08:19:04,055 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for 27b39638a4e82980fa51c6694c44d0ad: 2024-12-06T08:19:04,057 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed 27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:19:04,057 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=27b39638a4e82980fa51c6694c44d0ad, regionState=CLOSED 2024-12-06T08:19:04,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-06T08:19:04,061 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure 27b39638a4e82980fa51c6694c44d0ad, server=b6b797fc3981,38041,1733473111442 in 1.6290 sec 2024-12-06T08:19:04,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-12-06T08:19:04,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=27b39638a4e82980fa51c6694c44d0ad, UNASSIGN in 1.6350 sec 2024-12-06T08:19:04,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-06T08:19:04,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6420 sec 2024-12-06T08:19:04,066 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473144066"}]},"ts":"1733473144066"} 2024-12-06T08:19:04,067 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T08:19:04,069 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T08:19:04,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6600 sec 2024-12-06T08:19:04,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-06T08:19:04,521 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-06T08:19:04,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T08:19:04,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:04,529 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:04,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-06T08:19:04,531 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=34, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:04,534 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:19:04,538 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/recovered.edits] 2024-12-06T08:19:04,540 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/04aaea443b7f4a0c8e8f4f7cb904a5be to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/04aaea443b7f4a0c8e8f4f7cb904a5be 2024-12-06T08:19:04,541 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7db5f1fa9dd740ddbc8748746af174a9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/7db5f1fa9dd740ddbc8748746af174a9 2024-12-06T08:19:04,543 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c8cf594e2dc7476b818364c404db2df4 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/A/c8cf594e2dc7476b818364c404db2df4 2024-12-06T08:19:04,545 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/815faef7e5ed486284bbb5420d02fd1b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/815faef7e5ed486284bbb5420d02fd1b 2024-12-06T08:19:04,546 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/927d4cf44a574897ae8ccf9e6d871a65 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/927d4cf44a574897ae8ccf9e6d871a65 2024-12-06T08:19:04,547 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/ef9e3a9517c242ceb5bf5273a489d133 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/B/ef9e3a9517c242ceb5bf5273a489d133 2024-12-06T08:19:04,550 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/04cba9e80b494c2c8a3d02e82f02f2ee to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/04cba9e80b494c2c8a3d02e82f02f2ee 2024-12-06T08:19:04,551 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/2733f8184c3247c3a8f38bb4c1a2cde5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/2733f8184c3247c3a8f38bb4c1a2cde5 2024-12-06T08:19:04,552 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/b4daa98c7bf14eaaad019b131a20fe52 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/C/b4daa98c7bf14eaaad019b131a20fe52 2024-12-06T08:19:04,555 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/recovered.edits/539.seqid to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad/recovered.edits/539.seqid 2024-12-06T08:19:04,555 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/27b39638a4e82980fa51c6694c44d0ad 2024-12-06T08:19:04,556 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T08:19:04,561 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=34, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:04,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-06T08:19:04,569 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T08:19:04,602 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T08:19:04,604 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=34, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:04,604 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T08:19:04,604 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733473144604"}]},"ts":"9223372036854775807"} 2024-12-06T08:19:04,608 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T08:19:04,608 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 27b39638a4e82980fa51c6694c44d0ad, NAME => 'TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T08:19:04,608 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T08:19:04,608 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733473144608"}]},"ts":"9223372036854775807"} 2024-12-06T08:19:04,611 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T08:19:04,614 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=34, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:04,615 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 90 msec 2024-12-06T08:19:04,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-06T08:19:04,632 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-06T08:19:04,643 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;b6b797fc3981:38041-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-705260017_22 at /127.0.0.1:35602 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=451 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=347 (was 152) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8235 (was 8104) - AvailableMemoryMB LEAK? - 2024-12-06T08:19:04,654 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=347, ProcessCount=11, AvailableMemoryMB=8234 2024-12-06T08:19:04,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T08:19:04,656 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:19:04,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:04,658 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:19:04,658 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:04,658 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 35 2024-12-06T08:19:04,659 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:19:04,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-06T08:19:04,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741956_1132 (size=963) 2024-12-06T08:19:04,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-06T08:19:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-06T08:19:05,069 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156 2024-12-06T08:19:05,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741957_1133 (size=53) 2024-12-06T08:19:05,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-06T08:19:05,476 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:19:05,476 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 03df86e7064722e5116b657f067426bf, disabling compactions & flushes 2024-12-06T08:19:05,476 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:05,476 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:05,476 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. after waiting 0 ms 2024-12-06T08:19:05,477 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:05,477 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:05,477 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:05,478 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:19:05,478 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733473145478"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733473145478"}]},"ts":"1733473145478"} 2024-12-06T08:19:05,480 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:19:05,481 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:19:05,481 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473145481"}]},"ts":"1733473145481"} 2024-12-06T08:19:05,482 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T08:19:05,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, ASSIGN}] 2024-12-06T08:19:05,488 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, ASSIGN 2024-12-06T08:19:05,488 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, ASSIGN; state=OFFLINE, location=b6b797fc3981,38041,1733473111442; forceNewPlan=false, retain=false 2024-12-06T08:19:05,511 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:19:05,513 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55226, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:19:05,639 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=03df86e7064722e5116b657f067426bf, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:05,641 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; OpenRegionProcedure 03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:19:05,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-06T08:19:05,792 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:05,796 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:05,796 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7285): Opening region: {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:19:05,797 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:05,797 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:19:05,797 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7327): checking encryption for 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:05,797 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7330): checking classloading for 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:05,799 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:05,800 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:19:05,801 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03df86e7064722e5116b657f067426bf columnFamilyName A 2024-12-06T08:19:05,801 DEBUG [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:05,801 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(327): Store=03df86e7064722e5116b657f067426bf/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:19:05,801 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:05,802 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:19:05,803 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03df86e7064722e5116b657f067426bf columnFamilyName B 2024-12-06T08:19:05,803 DEBUG [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:05,804 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(327): Store=03df86e7064722e5116b657f067426bf/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:19:05,804 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:05,805 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:19:05,805 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03df86e7064722e5116b657f067426bf columnFamilyName C 2024-12-06T08:19:05,805 DEBUG [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:05,806 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(327): Store=03df86e7064722e5116b657f067426bf/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:19:05,806 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:05,807 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf 2024-12-06T08:19:05,807 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf 2024-12-06T08:19:05,809 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:19:05,810 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1085): writing seq id for 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:05,812 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:19:05,813 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1102): Opened 03df86e7064722e5116b657f067426bf; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59691763, jitterRate=-0.11052341759204865}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:19:05,814 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1001): Region open journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:05,814 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., pid=37, masterSystemTime=1733473145792 2024-12-06T08:19:05,816 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:05,816 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:05,817 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=03df86e7064722e5116b657f067426bf, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:05,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-06T08:19:05,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; OpenRegionProcedure 03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 in 177 msec 2024-12-06T08:19:05,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-06T08:19:05,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, ASSIGN in 335 msec 2024-12-06T08:19:05,826 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:19:05,826 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473145826"}]},"ts":"1733473145826"} 2024-12-06T08:19:05,827 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T08:19:05,830 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:19:05,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1740 sec 2024-12-06T08:19:06,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-06T08:19:06,764 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-12-06T08:19:06,766 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5caaf139 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e560c7b 2024-12-06T08:19:06,770 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ddf4c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:06,772 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:06,776 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:06,778 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:19:06,780 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:19:06,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T08:19:06,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:19:06,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:06,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741958_1134 (size=999) 2024-12-06T08:19:07,207 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-06T08:19:07,207 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-06T08:19:07,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T08:19:07,219 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, REOPEN/MOVE}] 2024-12-06T08:19:07,220 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, REOPEN/MOVE 2024-12-06T08:19:07,220 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=03df86e7064722e5116b657f067426bf, regionState=CLOSING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,222 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T08:19:07,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure 03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:19:07,373 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,374 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,374 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T08:19:07,374 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing 03df86e7064722e5116b657f067426bf, disabling compactions & flushes 2024-12-06T08:19:07,374 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:07,374 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:07,375 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. after waiting 0 ms 2024-12-06T08:19:07,375 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:07,379 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-06T08:19:07,379 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:07,379 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:07,379 WARN [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionServer(3786): Not adding moved region record: 03df86e7064722e5116b657f067426bf to self. 2024-12-06T08:19:07,381 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,381 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=03df86e7064722e5116b657f067426bf, regionState=CLOSED 2024-12-06T08:19:07,384 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-06T08:19:07,384 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure 03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 in 160 msec 2024-12-06T08:19:07,385 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, REOPEN/MOVE; state=CLOSED, location=b6b797fc3981,38041,1733473111442; forceNewPlan=false, retain=true 2024-12-06T08:19:07,535 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=03df86e7064722e5116b657f067426bf, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE; OpenRegionProcedure 03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:19:07,688 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,691 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:07,692 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7285): Opening region: {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:19:07,692 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,692 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:19:07,692 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7327): checking encryption for 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,692 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7330): checking classloading for 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,697 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,698 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:19:07,703 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03df86e7064722e5116b657f067426bf columnFamilyName A 2024-12-06T08:19:07,705 DEBUG [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:07,706 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(327): Store=03df86e7064722e5116b657f067426bf/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:19:07,706 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,707 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:19:07,707 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03df86e7064722e5116b657f067426bf columnFamilyName B 2024-12-06T08:19:07,707 DEBUG [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:07,708 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(327): Store=03df86e7064722e5116b657f067426bf/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:19:07,708 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,708 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:19:07,709 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03df86e7064722e5116b657f067426bf columnFamilyName C 2024-12-06T08:19:07,709 DEBUG [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:07,709 INFO [StoreOpener-03df86e7064722e5116b657f067426bf-1 {}] regionserver.HStore(327): Store=03df86e7064722e5116b657f067426bf/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:19:07,709 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:07,710 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,711 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,713 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:19:07,715 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1085): writing seq id for 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,715 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1102): Opened 03df86e7064722e5116b657f067426bf; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60714224, jitterRate=-0.09528756141662598}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:19:07,717 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1001): Region open journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:07,718 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., pid=42, masterSystemTime=1733473147688 2024-12-06T08:19:07,719 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:07,720 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:07,720 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=03df86e7064722e5116b657f067426bf, regionState=OPEN, openSeqNum=5, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=40 2024-12-06T08:19:07,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=40, state=SUCCESS; OpenRegionProcedure 03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 in 184 msec 2024-12-06T08:19:07,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-06T08:19:07,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, REOPEN/MOVE in 504 msec 2024-12-06T08:19:07,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-06T08:19:07,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 517 msec 2024-12-06T08:19:07,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 941 msec 2024-12-06T08:19:07,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-06T08:19:07,740 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bcbdbdb to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c826820 2024-12-06T08:19:07,748 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7362d978, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:07,750 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79982672 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2931c73e 2024-12-06T08:19:07,755 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bad2e85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:07,756 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b55744e to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@176c5c1b 2024-12-06T08:19:07,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328f994d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:07,762 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x454f1431 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@190853fc 2024-12-06T08:19:07,765 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19a533a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:07,767 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x505d5ccd to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46114993 2024-12-06T08:19:07,770 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769942d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:07,771 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-12-06T08:19:07,781 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2885d2d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:07,782 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68f0be85 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@247c0c93 2024-12-06T08:19:07,786 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e911df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:07,787 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x152377d4 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@517ff977 2024-12-06T08:19:07,790 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b727d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:07,791 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a52344f to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3448d233 2024-12-06T08:19:07,794 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c7940d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:07,800 DEBUG [hconnection-0x3dfb4f96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:07,804 DEBUG [hconnection-0x3a83afe5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:07,806 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49868, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:07,815 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49874, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:07,820 DEBUG [hconnection-0x2696aa23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:07,822 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49880, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:07,825 DEBUG [hconnection-0x42b4d2a0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:07,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:07,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-06T08:19:07,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-06T08:19:07,828 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:07,829 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:07,829 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:07,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:07,832 DEBUG [hconnection-0x12e2ac45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:07,838 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49896, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:07,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:19:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:07,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:07,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:07,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:07,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:07,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:07,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:07,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473207887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,892 DEBUG [hconnection-0x452cbbe5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:07,894 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:07,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473207891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473207892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,912 DEBUG [hconnection-0x1b4dae9e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:07,913 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49928, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:07,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473207917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-06T08:19:07,932 DEBUG [hconnection-0x6fe45cb1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:07,936 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:07,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:07,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473207941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,948 DEBUG [hconnection-0x32a248f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:07,950 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49954, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:07,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206295709b17e774cb5af9423cfa53b9828_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473147834/Put/seqid=0 2024-12-06T08:19:07,982 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:07,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-06T08:19:07,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:07,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:07,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:07,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:07,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473207998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473207999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473208019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473207999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741959_1135 (size=12154) 2024-12-06T08:19:08,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473208043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,056 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:08,078 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206295709b17e774cb5af9423cfa53b9828_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206295709b17e774cb5af9423cfa53b9828_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:08,081 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/95acaa833da74d448abe1073c0700a52, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:08,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/95acaa833da74d448abe1073c0700a52 is 175, key is test_row_0/A:col10/1733473147834/Put/seqid=0 2024-12-06T08:19:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-06T08:19:08,136 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-06T08:19:08,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:08,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741960_1136 (size=30955) 2024-12-06T08:19:08,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473208223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473208234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473208234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473208235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473208251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,292 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-06T08:19:08,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:08,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-06T08:19:08,446 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-06T08:19:08,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:08,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473208530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473208538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473208539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473208539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,546 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/95acaa833da74d448abe1073c0700a52 2024-12-06T08:19:08,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:08,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473208556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/f8b0fe9be04649339527389601e646c6 is 50, key is test_row_0/B:col10/1733473147834/Put/seqid=0 2024-12-06T08:19:08,600 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-06T08:19:08,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:08,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741961_1137 (size=12001) 2024-12-06T08:19:08,756 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-06T08:19:08,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:08,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,912 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:08,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-06T08:19:08,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:08,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:08,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:08,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-06T08:19:09,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/f8b0fe9be04649339527389601e646c6 2024-12-06T08:19:09,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:09,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473209041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:09,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:09,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473209044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:09,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:09,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473209044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:09,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:09,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473209045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:09,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:09,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473209061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:09,066 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:09,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-06T08:19:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:09,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:09,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:09,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/a817f6bdc25b4addb692dfee14db392e is 50, key is test_row_0/C:col10/1733473147834/Put/seqid=0 2024-12-06T08:19:09,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741962_1138 (size=12001) 2024-12-06T08:19:09,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/a817f6bdc25b4addb692dfee14db392e 2024-12-06T08:19:09,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/95acaa833da74d448abe1073c0700a52 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/95acaa833da74d448abe1073c0700a52 2024-12-06T08:19:09,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/95acaa833da74d448abe1073c0700a52, entries=150, sequenceid=16, filesize=30.2 K 2024-12-06T08:19:09,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/f8b0fe9be04649339527389601e646c6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f8b0fe9be04649339527389601e646c6 2024-12-06T08:19:09,184 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f8b0fe9be04649339527389601e646c6, entries=150, sequenceid=16, filesize=11.7 K 2024-12-06T08:19:09,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/a817f6bdc25b4addb692dfee14db392e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/a817f6bdc25b4addb692dfee14db392e 2024-12-06T08:19:09,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/a817f6bdc25b4addb692dfee14db392e, entries=150, sequenceid=16, filesize=11.7 K 2024-12-06T08:19:09,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 03df86e7064722e5116b657f067426bf in 1352ms, sequenceid=16, compaction requested=false 2024-12-06T08:19:09,194 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-06T08:19:09,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:09,220 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:09,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-06T08:19:09,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:09,221 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:19:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:09,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d6df13e497284926b06c140ee9543cbf_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473147887/Put/seqid=0 2024-12-06T08:19:09,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741963_1139 (size=12154) 2024-12-06T08:19:09,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,299 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d6df13e497284926b06c140ee9543cbf_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d6df13e497284926b06c140ee9543cbf_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:09,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/ce22768a1baf456d914ca2ad2c9fe42e, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:09,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/ce22768a1baf456d914ca2ad2c9fe42e is 175, key is test_row_0/A:col10/1733473147887/Put/seqid=0 2024-12-06T08:19:09,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741964_1140 (size=30955) 2024-12-06T08:19:09,311 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/ce22768a1baf456d914ca2ad2c9fe42e 2024-12-06T08:19:09,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/cfa011746b41459ba97363a7a9576389 is 50, key is test_row_0/B:col10/1733473147887/Put/seqid=0 2024-12-06T08:19:09,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741965_1141 (size=12001) 2024-12-06T08:19:09,368 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/cfa011746b41459ba97363a7a9576389 2024-12-06T08:19:09,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/34d8cb2ba93c469eb80d215c73b92f9e is 50, key is test_row_0/C:col10/1733473147887/Put/seqid=0 2024-12-06T08:19:09,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741966_1142 (size=12001) 2024-12-06T08:19:09,464 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/34d8cb2ba93c469eb80d215c73b92f9e 2024-12-06T08:19:09,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/ce22768a1baf456d914ca2ad2c9fe42e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ce22768a1baf456d914ca2ad2c9fe42e 2024-12-06T08:19:09,479 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ce22768a1baf456d914ca2ad2c9fe42e, entries=150, sequenceid=41, filesize=30.2 K 2024-12-06T08:19:09,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/cfa011746b41459ba97363a7a9576389 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/cfa011746b41459ba97363a7a9576389 2024-12-06T08:19:09,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,494 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/cfa011746b41459ba97363a7a9576389, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T08:19:09,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/34d8cb2ba93c469eb80d215c73b92f9e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/34d8cb2ba93c469eb80d215c73b92f9e 2024-12-06T08:19:09,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,530 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/34d8cb2ba93c469eb80d215c73b92f9e, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T08:19:09,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,531 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 03df86e7064722e5116b657f067426bf in 310ms, sequenceid=41, compaction requested=false 2024-12-06T08:19:09,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:09,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:09,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-06T08:19:09,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-06T08:19:09,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,535 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-06T08:19:09,535 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7040 sec 2024-12-06T08:19:09,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,537 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 1.7100 sec 2024-12-06T08:19:09,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,617 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T08:19:09,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-06T08:19:09,933 INFO [Thread-670 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-06T08:19:09,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,935 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-06T08:19:09,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,937 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:09,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-06T08:19:09,938 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:09,938 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:09,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:09,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-06T08:19:10,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,089 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-06T08:19:10,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:10,090 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-06T08:19:10,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:10,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:10,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:10,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:10,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:10,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:10,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:10,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:10,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412061c68343e0a7c483c803402ac131f6bbc_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473150087/Put/seqid=0 2024-12-06T08:19:10,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741967_1143 (size=12154) 2024-12-06T08:19:10,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,148 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412061c68343e0a7c483c803402ac131f6bbc_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412061c68343e0a7c483c803402ac131f6bbc_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:10,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/20978e56318542308850ac5500d5168e, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:10,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/20978e56318542308850ac5500d5168e is 175, key is test_row_0/A:col10/1733473150087/Put/seqid=0 2024-12-06T08:19:10,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473210157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473210160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473210163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473210164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473210165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741968_1144 (size=30955) 2024-12-06T08:19:10,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,188 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=47, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/20978e56318542308850ac5500d5168e 2024-12-06T08:19:10,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/487f2123ea9b41be900d08844aabbc9d is 50, key is test_row_0/B:col10/1733473150087/Put/seqid=0 2024-12-06T08:19:10,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-06T08:19:10,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741969_1145 (size=12001) 2024-12-06T08:19:10,271 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/487f2123ea9b41be900d08844aabbc9d 2024-12-06T08:19:10,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473210277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473210274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473210277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473210277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473210292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/7cc083f858554b8ca22df4d3d54b4064 is 50, key is test_row_0/C:col10/1733473150087/Put/seqid=0 2024-12-06T08:19:10,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741970_1146 (size=12001) 2024-12-06T08:19:10,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473210482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473210482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473210483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473210483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473210497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-06T08:19:10,763 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/7cc083f858554b8ca22df4d3d54b4064 2024-12-06T08:19:10,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/20978e56318542308850ac5500d5168e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/20978e56318542308850ac5500d5168e 2024-12-06T08:19:10,777 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/20978e56318542308850ac5500d5168e, entries=150, sequenceid=47, filesize=30.2 K 2024-12-06T08:19:10,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/487f2123ea9b41be900d08844aabbc9d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/487f2123ea9b41be900d08844aabbc9d 2024-12-06T08:19:10,786 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/487f2123ea9b41be900d08844aabbc9d, entries=150, sequenceid=47, filesize=11.7 K 2024-12-06T08:19:10,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/7cc083f858554b8ca22df4d3d54b4064 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/7cc083f858554b8ca22df4d3d54b4064 2024-12-06T08:19:10,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473210792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473210792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473210792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,803 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/7cc083f858554b8ca22df4d3d54b4064, entries=150, sequenceid=47, filesize=11.7 K 2024-12-06T08:19:10,805 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 03df86e7064722e5116b657f067426bf in 715ms, sequenceid=47, compaction requested=true 2024-12-06T08:19:10,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:10,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:10,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-06T08:19:10,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-06T08:19:10,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-06T08:19:10,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 869 msec 2024-12-06T08:19:10,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 874 msec 2024-12-06T08:19:10,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=194.56 KB heapSize=510.52 KB 2024-12-06T08:19:10,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:10,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:10,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:10,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:10,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:10,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:10,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:10,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473210815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473210817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120671df75cf39014a869d6a2bc803795827_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473150129/Put/seqid=0 2024-12-06T08:19:10,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741971_1147 (size=12154) 2024-12-06T08:19:10,881 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:10,889 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120671df75cf39014a869d6a2bc803795827_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120671df75cf39014a869d6a2bc803795827_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:10,892 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c790489617284ac797f75f042f9b6404, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:10,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c790489617284ac797f75f042f9b6404 is 175, key is test_row_0/A:col10/1733473150129/Put/seqid=0 2024-12-06T08:19:10,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741972_1148 (size=30955) 2024-12-06T08:19:10,923 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=64.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c790489617284ac797f75f042f9b6404 2024-12-06T08:19:10,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473210921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:10,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473210922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:10,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/53ed1c8a95a64780b3b3a8dec6c848a2 is 50, key is test_row_0/B:col10/1733473150129/Put/seqid=0 2024-12-06T08:19:10,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741973_1149 (size=12001) 2024-12-06T08:19:10,985 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/53ed1c8a95a64780b3b3a8dec6c848a2 2024-12-06T08:19:10,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/e2e65591150f47c6a8f2cce307b112f3 is 50, key is test_row_0/C:col10/1733473150129/Put/seqid=0 2024-12-06T08:19:11,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741974_1150 (size=12001) 2024-12-06T08:19:11,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/e2e65591150f47c6a8f2cce307b112f3 2024-12-06T08:19:11,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-06T08:19:11,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c790489617284ac797f75f042f9b6404 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c790489617284ac797f75f042f9b6404 2024-12-06T08:19:11,044 INFO [Thread-670 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-06T08:19:11,048 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:11,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-06T08:19:11,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T08:19:11,050 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:11,051 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:11,051 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:11,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c790489617284ac797f75f042f9b6404, entries=150, sequenceid=79, filesize=30.2 K 2024-12-06T08:19:11,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/53ed1c8a95a64780b3b3a8dec6c848a2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/53ed1c8a95a64780b3b3a8dec6c848a2 2024-12-06T08:19:11,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/53ed1c8a95a64780b3b3a8dec6c848a2, entries=150, sequenceid=79, filesize=11.7 K 2024-12-06T08:19:11,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/e2e65591150f47c6a8f2cce307b112f3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/e2e65591150f47c6a8f2cce307b112f3 2024-12-06T08:19:11,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/e2e65591150f47c6a8f2cce307b112f3, entries=150, sequenceid=79, filesize=11.7 K 2024-12-06T08:19:11,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=13.42 KB/13740 for 03df86e7064722e5116b657f067426bf in 258ms, sequenceid=79, compaction requested=true 2024-12-06T08:19:11,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:11,070 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:11,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:11,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:11,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:11,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:11,072 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:11,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:11,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:11,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,073 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123820 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:11,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,073 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/A is initiating minor compaction (all files) 2024-12-06T08:19:11,073 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/A in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,073 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/95acaa833da74d448abe1073c0700a52, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ce22768a1baf456d914ca2ad2c9fe42e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/20978e56318542308850ac5500d5168e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c790489617284ac797f75f042f9b6404] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=120.9 K 2024-12-06T08:19:11,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,073 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,074 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/95acaa833da74d448abe1073c0700a52, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ce22768a1baf456d914ca2ad2c9fe42e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/20978e56318542308850ac5500d5168e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c790489617284ac797f75f042f9b6404] 2024-12-06T08:19:11,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,074 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95acaa833da74d448abe1073c0700a52, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733473147830 2024-12-06T08:19:11,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,075 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce22768a1baf456d914ca2ad2c9fe42e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733473147883 2024-12-06T08:19:11,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,076 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:11,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,080 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/B is initiating minor compaction (all files) 2024-12-06T08:19:11,080 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/B in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,080 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f8b0fe9be04649339527389601e646c6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/cfa011746b41459ba97363a7a9576389, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/487f2123ea9b41be900d08844aabbc9d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/53ed1c8a95a64780b3b3a8dec6c848a2] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=46.9 K 2024-12-06T08:19:11,080 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20978e56318542308850ac5500d5168e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733473150084 2024-12-06T08:19:11,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,081 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f8b0fe9be04649339527389601e646c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733473147830 2024-12-06T08:19:11,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,081 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting cfa011746b41459ba97363a7a9576389, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733473147883 2024-12-06T08:19:11,081 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c790489617284ac797f75f042f9b6404, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733473150129 2024-12-06T08:19:11,082 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 487f2123ea9b41be900d08844aabbc9d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733473150084 2024-12-06T08:19:11,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,083 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 53ed1c8a95a64780b3b3a8dec6c848a2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733473150129 2024-12-06T08:19:11,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,117 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:11,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,124 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#B#compaction#130 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,125 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/f12e31e0e49b4d018c3134044ad99e17 is 50, key is test_row_0/B:col10/1733473150129/Put/seqid=0 2024-12-06T08:19:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,131 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412069c0c1c7cd8c3437fb8c12e676e7277fc_03df86e7064722e5116b657f067426bf store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:11,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,135 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412069c0c1c7cd8c3437fb8c12e676e7277fc_03df86e7064722e5116b657f067426bf, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:11,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,136 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412069c0c1c7cd8c3437fb8c12e676e7277fc_03df86e7064722e5116b657f067426bf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:11,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:19:11,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:11,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:11,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:11,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:11,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:11,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:11,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:11,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T08:19:11,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741975_1151 (size=12139) 2024-12-06T08:19:11,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741976_1152 (size=4469) 2024-12-06T08:19:11,173 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#A#compaction#129 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:11,176 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/a6aa73babdca4a5e99a69e8193c9762e is 175, key is test_row_0/A:col10/1733473150129/Put/seqid=0 2024-12-06T08:19:11,182 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/f12e31e0e49b4d018c3134044ad99e17 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f12e31e0e49b4d018c3134044ad99e17 2024-12-06T08:19:11,188 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 03df86e7064722e5116b657f067426bf/B of 03df86e7064722e5116b657f067426bf into f12e31e0e49b4d018c3134044ad99e17(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:11,189 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:11,189 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/B, priority=12, startTime=1733473151072; duration=0sec 2024-12-06T08:19:11,189 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:11,189 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:B 2024-12-06T08:19:11,189 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:11,190 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:11,191 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/C is initiating minor compaction (all files) 2024-12-06T08:19:11,191 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/C in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,191 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/a817f6bdc25b4addb692dfee14db392e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/34d8cb2ba93c469eb80d215c73b92f9e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/7cc083f858554b8ca22df4d3d54b4064, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/e2e65591150f47c6a8f2cce307b112f3] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=46.9 K 2024-12-06T08:19:11,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e1d67c866ee74023bcb580c828870195_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473151131/Put/seqid=0 2024-12-06T08:19:11,193 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a817f6bdc25b4addb692dfee14db392e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733473147830 2024-12-06T08:19:11,194 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 34d8cb2ba93c469eb80d215c73b92f9e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733473147883 2024-12-06T08:19:11,194 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cc083f858554b8ca22df4d3d54b4064, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733473150084 2024-12-06T08:19:11,194 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e2e65591150f47c6a8f2cce307b112f3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733473150129 2024-12-06T08:19:11,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T08:19:11,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:11,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741977_1153 (size=31093) 2024-12-06T08:19:11,228 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/a6aa73babdca4a5e99a69e8193c9762e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6aa73babdca4a5e99a69e8193c9762e 2024-12-06T08:19:11,234 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 03df86e7064722e5116b657f067426bf/A of 03df86e7064722e5116b657f067426bf into a6aa73babdca4a5e99a69e8193c9762e(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:11,234 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:11,234 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/A, priority=12, startTime=1733473151070; duration=0sec 2024-12-06T08:19:11,234 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:11,234 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:A 2024-12-06T08:19:11,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473211241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473211242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,251 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#C#compaction#132 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:11,251 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/936960218ff24630948998f6ea5ddbe8 is 50, key is test_row_0/C:col10/1733473150129/Put/seqid=0 2024-12-06T08:19:11,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741979_1155 (size=24358) 2024-12-06T08:19:11,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741980_1156 (size=12139) 2024-12-06T08:19:11,291 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:11,301 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e1d67c866ee74023bcb580c828870195_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e1d67c866ee74023bcb580c828870195_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:11,303 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/936960218ff24630948998f6ea5ddbe8 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/936960218ff24630948998f6ea5ddbe8 2024-12-06T08:19:11,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473211301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,304 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/4dbf173dc850400b974418e20638795c, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:11,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/4dbf173dc850400b974418e20638795c is 175, key is test_row_0/A:col10/1733473151131/Put/seqid=0 2024-12-06T08:19:11,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473211302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473211303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741978_1154 (size=73995) 2024-12-06T08:19:11,314 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 03df86e7064722e5116b657f067426bf/C of 03df86e7064722e5116b657f067426bf into 936960218ff24630948998f6ea5ddbe8(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:11,314 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:11,314 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/C, priority=12, startTime=1733473151072; duration=0sec 2024-12-06T08:19:11,314 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:11,314 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:C 2024-12-06T08:19:11,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473211345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473211346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T08:19:11,370 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T08:19:11,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:11,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,523 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T08:19:11,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:11,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473211549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473211550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T08:19:11,680 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T08:19:11,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:11,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,710 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=90, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/4dbf173dc850400b974418e20638795c 2024-12-06T08:19:11,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/af6c72b240fb44f2b1a946b3c989ea6d is 50, key is test_row_0/B:col10/1733473151131/Put/seqid=0 2024-12-06T08:19:11,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741981_1157 (size=12001) 2024-12-06T08:19:11,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/af6c72b240fb44f2b1a946b3c989ea6d 2024-12-06T08:19:11,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/47bc1221bd98479991aa4f9cd4290b87 is 50, key is test_row_0/C:col10/1733473151131/Put/seqid=0 2024-12-06T08:19:11,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741982_1158 (size=12001) 2024-12-06T08:19:11,824 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/47bc1221bd98479991aa4f9cd4290b87 2024-12-06T08:19:11,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/4dbf173dc850400b974418e20638795c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/4dbf173dc850400b974418e20638795c 2024-12-06T08:19:11,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/4dbf173dc850400b974418e20638795c, entries=400, sequenceid=90, filesize=72.3 K 2024-12-06T08:19:11,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T08:19:11,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/af6c72b240fb44f2b1a946b3c989ea6d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/af6c72b240fb44f2b1a946b3c989ea6d 2024-12-06T08:19:11,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:11,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:11,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/af6c72b240fb44f2b1a946b3c989ea6d, entries=150, sequenceid=90, filesize=11.7 K 2024-12-06T08:19:11,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/47bc1221bd98479991aa4f9cd4290b87 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/47bc1221bd98479991aa4f9cd4290b87 2024-12-06T08:19:11,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473211855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:11,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473211855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/47bc1221bd98479991aa4f9cd4290b87, entries=150, sequenceid=90, filesize=11.7 K 2024-12-06T08:19:11,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 03df86e7064722e5116b657f067426bf in 712ms, sequenceid=90, compaction requested=false 2024-12-06T08:19:11,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:11,993 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:11,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-06T08:19:11,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:11,995 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:19:11,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:11,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:11,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:11,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:11,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:11,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:12,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068857c7f46ac34e8092c28b9503e1c2dc_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473151239/Put/seqid=0 2024-12-06T08:19:12,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741983_1159 (size=12154) 2024-12-06T08:19:12,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,067 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068857c7f46ac34e8092c28b9503e1c2dc_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068857c7f46ac34e8092c28b9503e1c2dc_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:12,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/7633bb991e034f1cb86b1f6f23abb21b, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:12,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/7633bb991e034f1cb86b1f6f23abb21b is 175, key is test_row_0/A:col10/1733473151239/Put/seqid=0 2024-12-06T08:19:12,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741984_1160 (size=30955) 2024-12-06T08:19:12,102 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/7633bb991e034f1cb86b1f6f23abb21b 2024-12-06T08:19:12,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/678b6cc540704bc9899b85648d22099e is 50, key is test_row_0/B:col10/1733473151239/Put/seqid=0 2024-12-06T08:19:12,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741985_1161 (size=12001) 2024-12-06T08:19:12,140 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/678b6cc540704bc9899b85648d22099e 2024-12-06T08:19:12,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/01e46483a3f84609a126a88ce1eeb7bd is 50, key is test_row_0/C:col10/1733473151239/Put/seqid=0 2024-12-06T08:19:12,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T08:19:12,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741986_1162 (size=12001) 2024-12-06T08:19:12,175 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/01e46483a3f84609a126a88ce1eeb7bd 2024-12-06T08:19:12,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/7633bb991e034f1cb86b1f6f23abb21b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/7633bb991e034f1cb86b1f6f23abb21b 2024-12-06T08:19:12,194 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/7633bb991e034f1cb86b1f6f23abb21b, entries=150, sequenceid=118, filesize=30.2 K 2024-12-06T08:19:12,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/678b6cc540704bc9899b85648d22099e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/678b6cc540704bc9899b85648d22099e 2024-12-06T08:19:12,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,206 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/678b6cc540704bc9899b85648d22099e, entries=150, sequenceid=118, filesize=11.7 K 2024-12-06T08:19:12,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/01e46483a3f84609a126a88ce1eeb7bd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/01e46483a3f84609a126a88ce1eeb7bd 2024-12-06T08:19:12,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,214 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/01e46483a3f84609a126a88ce1eeb7bd, entries=150, sequenceid=118, filesize=11.7 K 2024-12-06T08:19:12,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,217 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 03df86e7064722e5116b657f067426bf in 222ms, sequenceid=118, compaction requested=true 2024-12-06T08:19:12,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:12,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:12,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-06T08:19:12,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-06T08:19:12,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-06T08:19:12,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1680 sec 2024-12-06T08:19:12,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.1740 sec 2024-12-06T08:19:12,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:12,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:19:12,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:12,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:12,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:12,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:12,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:12,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:12,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d7f29140dd0d4bba8f75b8ad99232055_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473152375/Put/seqid=0 2024-12-06T08:19:12,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741988_1164 (size=24408) 2024-12-06T08:19:12,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473212504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473212506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473212509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473212510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473212506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473212612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473212613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473212615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473212615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473212617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473212817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473212817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473212817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473212818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473212823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:12,875 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:12,883 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d7f29140dd0d4bba8f75b8ad99232055_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d7f29140dd0d4bba8f75b8ad99232055_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:12,884 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/93d6762f85b44f32913b32e20c97ef17, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:12,885 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/93d6762f85b44f32913b32e20c97ef17 is 175, key is test_row_0/A:col10/1733473152375/Put/seqid=0 2024-12-06T08:19:12,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741987_1163 (size=74045) 2024-12-06T08:19:13,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:13,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473213123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473213123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:13,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473213123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473213124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473213124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-06T08:19:13,156 INFO [Thread-670 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-06T08:19:13,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-06T08:19:13,165 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:13,165 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:13,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T08:19:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T08:19:13,290 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/93d6762f85b44f32913b32e20c97ef17 2024-12-06T08:19:13,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/b4aa693090a1455da1c139e52ba18ccf is 50, key is test_row_0/B:col10/1733473152375/Put/seqid=0 2024-12-06T08:19:13,318 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T08:19:13,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:13,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741989_1165 (size=12051) 2024-12-06T08:19:13,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/b4aa693090a1455da1c139e52ba18ccf 2024-12-06T08:19:13,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/b42dfca9ee9446d3b802b689d150878d is 50, key is test_row_0/C:col10/1733473152375/Put/seqid=0 2024-12-06T08:19:13,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741990_1166 (size=12051) 2024-12-06T08:19:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T08:19:13,471 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T08:19:13,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:13,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,625 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T08:19:13,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:13,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473213628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473213630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473213630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473213630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473213631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T08:19:13,778 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T08:19:13,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:13,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,779 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:13,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/b42dfca9ee9446d3b802b689d150878d 2024-12-06T08:19:13,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/93d6762f85b44f32913b32e20c97ef17 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/93d6762f85b44f32913b32e20c97ef17 2024-12-06T08:19:13,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/93d6762f85b44f32913b32e20c97ef17, entries=400, sequenceid=129, filesize=72.3 K 2024-12-06T08:19:13,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/b4aa693090a1455da1c139e52ba18ccf as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/b4aa693090a1455da1c139e52ba18ccf 2024-12-06T08:19:13,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/b4aa693090a1455da1c139e52ba18ccf, entries=150, sequenceid=129, filesize=11.8 K 2024-12-06T08:19:13,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/b42dfca9ee9446d3b802b689d150878d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b42dfca9ee9446d3b802b689d150878d 2024-12-06T08:19:13,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b42dfca9ee9446d3b802b689d150878d, entries=150, sequenceid=129, filesize=11.8 K 2024-12-06T08:19:13,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 03df86e7064722e5116b657f067426bf in 1486ms, sequenceid=129, compaction requested=true 2024-12-06T08:19:13,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:13,868 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:13,869 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 210088 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:13,870 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/A is initiating minor compaction (all files) 2024-12-06T08:19:13,870 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/A in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,870 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6aa73babdca4a5e99a69e8193c9762e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/4dbf173dc850400b974418e20638795c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/7633bb991e034f1cb86b1f6f23abb21b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/93d6762f85b44f32913b32e20c97ef17] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=205.2 K 2024-12-06T08:19:13,870 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,870 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6aa73babdca4a5e99a69e8193c9762e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/4dbf173dc850400b974418e20638795c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/7633bb991e034f1cb86b1f6f23abb21b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/93d6762f85b44f32913b32e20c97ef17] 2024-12-06T08:19:13,871 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6aa73babdca4a5e99a69e8193c9762e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733473150129 2024-12-06T08:19:13,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:13,871 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4dbf173dc850400b974418e20638795c, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733473150814 2024-12-06T08:19:13,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:13,872 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:13,872 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7633bb991e034f1cb86b1f6f23abb21b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733473151231 2024-12-06T08:19:13,873 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93d6762f85b44f32913b32e20c97ef17, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473152365 2024-12-06T08:19:13,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:13,874 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:13,874 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/B is initiating minor compaction (all files) 2024-12-06T08:19:13,874 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/B in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,874 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f12e31e0e49b4d018c3134044ad99e17, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/af6c72b240fb44f2b1a946b3c989ea6d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/678b6cc540704bc9899b85648d22099e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/b4aa693090a1455da1c139e52ba18ccf] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=47.1 K 2024-12-06T08:19:13,875 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f12e31e0e49b4d018c3134044ad99e17, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733473150129 2024-12-06T08:19:13,876 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting af6c72b240fb44f2b1a946b3c989ea6d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733473151131 2024-12-06T08:19:13,876 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 678b6cc540704bc9899b85648d22099e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733473151231 2024-12-06T08:19:13,877 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting b4aa693090a1455da1c139e52ba18ccf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473152375 2024-12-06T08:19:13,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:13,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:13,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:13,885 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:13,899 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#B#compaction#142 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:13,899 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/ef413efdcc7b44f89bba13441edbc475 is 50, key is test_row_0/B:col10/1733473152375/Put/seqid=0 2024-12-06T08:19:13,905 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206f7d6d82fdd5d459f8e66d6bbba4081de_03df86e7064722e5116b657f067426bf store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:13,909 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206f7d6d82fdd5d459f8e66d6bbba4081de_03df86e7064722e5116b657f067426bf, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:13,910 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206f7d6d82fdd5d459f8e66d6bbba4081de_03df86e7064722e5116b657f067426bf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:13,931 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:13,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-06T08:19:13,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,932 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:19:13,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:13,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:13,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:13,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:13,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:13,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:13,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741992_1168 (size=4469) 2024-12-06T08:19:13,941 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#A#compaction#141 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:13,942 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/28815eb73bbc4ca6915eec6d0fdf65b6 is 175, key is test_row_0/A:col10/1733473152375/Put/seqid=0 2024-12-06T08:19:13,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741991_1167 (size=12325) 2024-12-06T08:19:13,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206051a0de44a704d0c99131764d3a4cbb8_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473152507/Put/seqid=0 2024-12-06T08:19:13,953 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/ef413efdcc7b44f89bba13441edbc475 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/ef413efdcc7b44f89bba13441edbc475 2024-12-06T08:19:13,958 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 03df86e7064722e5116b657f067426bf/B of 03df86e7064722e5116b657f067426bf into ef413efdcc7b44f89bba13441edbc475(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:13,958 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:13,958 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/B, priority=12, startTime=1733473153872; duration=0sec 2024-12-06T08:19:13,959 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:13,959 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:B 2024-12-06T08:19:13,959 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:13,961 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:13,961 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/C is initiating minor compaction (all files) 2024-12-06T08:19:13,961 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/C in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:13,961 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/936960218ff24630948998f6ea5ddbe8, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/47bc1221bd98479991aa4f9cd4290b87, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/01e46483a3f84609a126a88ce1eeb7bd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b42dfca9ee9446d3b802b689d150878d] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=47.1 K 2024-12-06T08:19:13,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741993_1169 (size=31279) 2024-12-06T08:19:13,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741994_1170 (size=12304) 2024-12-06T08:19:13,963 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 936960218ff24630948998f6ea5ddbe8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733473150129 2024-12-06T08:19:13,964 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 47bc1221bd98479991aa4f9cd4290b87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733473151131 2024-12-06T08:19:13,965 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 01e46483a3f84609a126a88ce1eeb7bd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733473151231 2024-12-06T08:19:13,965 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting b42dfca9ee9446d3b802b689d150878d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473152375 2024-12-06T08:19:13,977 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#C#compaction#144 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:13,977 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/5ed7c25a8ad54d3080bb64ca407ad67f is 50, key is test_row_0/C:col10/1733473152375/Put/seqid=0 2024-12-06T08:19:13,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741995_1171 (size=12325) 2024-12-06T08:19:13,999 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/5ed7c25a8ad54d3080bb64ca407ad67f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/5ed7c25a8ad54d3080bb64ca407ad67f 2024-12-06T08:19:14,005 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 03df86e7064722e5116b657f067426bf/C of 03df86e7064722e5116b657f067426bf into 5ed7c25a8ad54d3080bb64ca407ad67f(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:14,005 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:14,005 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/C, priority=12, startTime=1733473153879; duration=0sec 2024-12-06T08:19:14,005 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:14,005 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:C 2024-12-06T08:19:14,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T08:19:14,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:14,370 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/28815eb73bbc4ca6915eec6d0fdf65b6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/28815eb73bbc4ca6915eec6d0fdf65b6 2024-12-06T08:19:14,371 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206051a0de44a704d0c99131764d3a4cbb8_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206051a0de44a704d0c99131764d3a4cbb8_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:14,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/943b8887e64a4f358a6e5a0e221cc1d7, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:14,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/943b8887e64a4f358a6e5a0e221cc1d7 is 175, key is test_row_0/A:col10/1733473152507/Put/seqid=0 2024-12-06T08:19:14,382 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 03df86e7064722e5116b657f067426bf/A of 03df86e7064722e5116b657f067426bf into 28815eb73bbc4ca6915eec6d0fdf65b6(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:14,382 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:14,382 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/A, priority=12, startTime=1733473153867; duration=0sec 2024-12-06T08:19:14,382 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:14,383 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:A 2024-12-06T08:19:14,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741996_1172 (size=31105) 2024-12-06T08:19:14,410 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=154, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/943b8887e64a4f358a6e5a0e221cc1d7 2024-12-06T08:19:14,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/e6b3909475a246d4a2d8d07d5385f292 is 50, key is test_row_0/B:col10/1733473152507/Put/seqid=0 2024-12-06T08:19:14,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741997_1173 (size=12151) 2024-12-06T08:19:14,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:14,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:14,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:14,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473214642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:14,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:14,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473214646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:14,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:14,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473214649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:14,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:14,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473214649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:14,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:14,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473214651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:14,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:14,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473214749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:14,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:14,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473214754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:14,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:14,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473214755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:14,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:14,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473214756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:14,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:14,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473214758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:14,860 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/e6b3909475a246d4a2d8d07d5385f292 2024-12-06T08:19:14,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/87cfd7645c9b4f7180817716b5956063 is 50, key is test_row_0/C:col10/1733473152507/Put/seqid=0 2024-12-06T08:19:14,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741998_1174 (size=12151) 2024-12-06T08:19:14,905 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/87cfd7645c9b4f7180817716b5956063 2024-12-06T08:19:14,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/943b8887e64a4f358a6e5a0e221cc1d7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/943b8887e64a4f358a6e5a0e221cc1d7 2024-12-06T08:19:14,924 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/943b8887e64a4f358a6e5a0e221cc1d7, entries=150, sequenceid=154, filesize=30.4 K 2024-12-06T08:19:14,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/e6b3909475a246d4a2d8d07d5385f292 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e6b3909475a246d4a2d8d07d5385f292 2024-12-06T08:19:14,931 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e6b3909475a246d4a2d8d07d5385f292, entries=150, sequenceid=154, filesize=11.9 K 2024-12-06T08:19:14,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/87cfd7645c9b4f7180817716b5956063 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/87cfd7645c9b4f7180817716b5956063 2024-12-06T08:19:14,941 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/87cfd7645c9b4f7180817716b5956063, entries=150, sequenceid=154, filesize=11.9 K 2024-12-06T08:19:14,945 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 03df86e7064722e5116b657f067426bf in 1013ms, sequenceid=154, compaction requested=false 2024-12-06T08:19:14,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:14,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:14,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-06T08:19:14,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-06T08:19:14,947 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-06T08:19:14,948 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7810 sec 2024-12-06T08:19:14,949 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.7850 sec 2024-12-06T08:19:14,958 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-06T08:19:14,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:14,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:14,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:14,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:14,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:14,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:14,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:14,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206fe23a7909bc448edbc5fed1baff7e7ca_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473154954/Put/seqid=0 2024-12-06T08:19:14,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741999_1175 (size=14794) 2024-12-06T08:19:14,984 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:14,988 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206fe23a7909bc448edbc5fed1baff7e7ca_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206fe23a7909bc448edbc5fed1baff7e7ca_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:14,989 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/0868e4ae11c049038b6cf660a03ba94f, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:14,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/0868e4ae11c049038b6cf660a03ba94f is 175, key is test_row_0/A:col10/1733473154954/Put/seqid=0 2024-12-06T08:19:14,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742000_1176 (size=39749) 2024-12-06T08:19:15,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473215015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473215017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473215018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473215019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473215020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473215121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473215122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473215122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473215122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473215131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-06T08:19:15,271 INFO [Thread-670 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-06T08:19:15,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:15,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-06T08:19:15,274 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:15,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T08:19:15,275 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:15,275 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:15,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473215323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473215325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473215326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473215329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473215335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T08:19:15,400 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=171, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/0868e4ae11c049038b6cf660a03ba94f 2024-12-06T08:19:15,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/a684d831c1ac4fa0bba715f9203ae27a is 50, key is test_row_0/B:col10/1733473154954/Put/seqid=0 2024-12-06T08:19:15,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742001_1177 (size=12151) 2024-12-06T08:19:15,426 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T08:19:15,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:15,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:15,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:15,427 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T08:19:15,580 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T08:19:15,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:15,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:15,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:15,580 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473215625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473215630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473215630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473215630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:15,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473215638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,646 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T08:19:15,646 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T08:19:15,732 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T08:19:15,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:15,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:15,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:15,733 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,813 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/a684d831c1ac4fa0bba715f9203ae27a 2024-12-06T08:19:15,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/2ca65c4b99e84a57ab461e39e2d98975 is 50, key is test_row_0/C:col10/1733473154954/Put/seqid=0 2024-12-06T08:19:15,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742002_1178 (size=12151) 2024-12-06T08:19:15,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T08:19:15,886 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:15,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T08:19:15,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:15,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:15,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:15,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:15,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:16,039 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:16,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T08:19:16,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:16,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:16,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:16,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:16,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:16,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:16,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:16,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473216128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:16,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:16,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473216137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:16,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:16,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473216137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:16,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:16,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473216137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:16,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:16,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473216142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:16,192 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:16,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T08:19:16,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:16,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:16,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:16,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:16,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:16,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:16,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/2ca65c4b99e84a57ab461e39e2d98975 2024-12-06T08:19:16,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/0868e4ae11c049038b6cf660a03ba94f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/0868e4ae11c049038b6cf660a03ba94f 2024-12-06T08:19:16,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/0868e4ae11c049038b6cf660a03ba94f, entries=200, sequenceid=171, filesize=38.8 K 2024-12-06T08:19:16,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/a684d831c1ac4fa0bba715f9203ae27a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a684d831c1ac4fa0bba715f9203ae27a 2024-12-06T08:19:16,247 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a684d831c1ac4fa0bba715f9203ae27a, entries=150, sequenceid=171, filesize=11.9 K 2024-12-06T08:19:16,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/2ca65c4b99e84a57ab461e39e2d98975 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/2ca65c4b99e84a57ab461e39e2d98975 2024-12-06T08:19:16,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/2ca65c4b99e84a57ab461e39e2d98975, entries=150, sequenceid=171, filesize=11.9 K 2024-12-06T08:19:16,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 03df86e7064722e5116b657f067426bf in 1298ms, sequenceid=171, compaction requested=true 2024-12-06T08:19:16,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:16,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:16,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:16,257 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:16,257 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:16,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:16,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:16,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:16,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:16,258 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102133 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:16,258 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/A is initiating minor compaction (all files) 2024-12-06T08:19:16,259 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/A in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:16,259 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/28815eb73bbc4ca6915eec6d0fdf65b6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/943b8887e64a4f358a6e5a0e221cc1d7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/0868e4ae11c049038b6cf660a03ba94f] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=99.7 K 2024-12-06T08:19:16,259 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:16,259 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/28815eb73bbc4ca6915eec6d0fdf65b6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/943b8887e64a4f358a6e5a0e221cc1d7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/0868e4ae11c049038b6cf660a03ba94f] 2024-12-06T08:19:16,259 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:16,259 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/B is initiating minor compaction (all files) 2024-12-06T08:19:16,259 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/B in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:16,259 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/ef413efdcc7b44f89bba13441edbc475, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e6b3909475a246d4a2d8d07d5385f292, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a684d831c1ac4fa0bba715f9203ae27a] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=35.8 K 2024-12-06T08:19:16,260 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28815eb73bbc4ca6915eec6d0fdf65b6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473152375 2024-12-06T08:19:16,260 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ef413efdcc7b44f89bba13441edbc475, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473152375 2024-12-06T08:19:16,260 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 943b8887e64a4f358a6e5a0e221cc1d7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733473152503 2024-12-06T08:19:16,260 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e6b3909475a246d4a2d8d07d5385f292, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733473152503 2024-12-06T08:19:16,261 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a684d831c1ac4fa0bba715f9203ae27a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733473154645 2024-12-06T08:19:16,261 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0868e4ae11c049038b6cf660a03ba94f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733473154645 2024-12-06T08:19:16,272 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#B#compaction#150 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:16,272 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/a7d03ce79203462986c0e115ab9f0086 is 50, key is test_row_0/B:col10/1733473154954/Put/seqid=0 2024-12-06T08:19:16,275 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:16,277 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120607683c4c9f504cd38b57c540704c2676_03df86e7064722e5116b657f067426bf store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:16,279 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120607683c4c9f504cd38b57c540704c2676_03df86e7064722e5116b657f067426bf, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:16,280 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120607683c4c9f504cd38b57c540704c2676_03df86e7064722e5116b657f067426bf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:16,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742004_1180 (size=4469) 2024-12-06T08:19:16,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742003_1179 (size=12527) 2024-12-06T08:19:16,346 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:16,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-06T08:19:16,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:16,347 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T08:19:16,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:16,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:16,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:16,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:16,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:16,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:16,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120619d41f1081154f219f680650b057f2d2_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473155018/Put/seqid=0 2024-12-06T08:19:16,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742005_1181 (size=12304) 2024-12-06T08:19:16,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T08:19:16,378 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120619d41f1081154f219f680650b057f2d2_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120619d41f1081154f219f680650b057f2d2_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:16,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/ac0b6fa898d34c099d56ae1bd75880c2, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:16,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/ac0b6fa898d34c099d56ae1bd75880c2 is 175, key is test_row_0/A:col10/1733473155018/Put/seqid=0 2024-12-06T08:19:16,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742006_1182 (size=31105) 2024-12-06T08:19:16,401 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=194, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/ac0b6fa898d34c099d56ae1bd75880c2 2024-12-06T08:19:16,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/c8b6596e96b74d3f8a883938c59b1638 is 50, key is test_row_0/B:col10/1733473155018/Put/seqid=0 2024-12-06T08:19:16,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742007_1183 (size=12151) 2024-12-06T08:19:16,706 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#A#compaction#151 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:16,706 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/910e37d4c48842dfb257f4963268db2b is 175, key is test_row_0/A:col10/1733473154954/Put/seqid=0 2024-12-06T08:19:16,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742008_1184 (size=31481) 2024-12-06T08:19:16,719 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/a7d03ce79203462986c0e115ab9f0086 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a7d03ce79203462986c0e115ab9f0086 2024-12-06T08:19:16,725 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/910e37d4c48842dfb257f4963268db2b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/910e37d4c48842dfb257f4963268db2b 2024-12-06T08:19:16,726 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/B of 03df86e7064722e5116b657f067426bf into a7d03ce79203462986c0e115ab9f0086(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:16,726 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:16,726 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/B, priority=13, startTime=1733473156257; duration=0sec 2024-12-06T08:19:16,727 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:16,727 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:B 2024-12-06T08:19:16,727 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:16,729 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:16,729 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/C is initiating minor compaction (all files) 2024-12-06T08:19:16,729 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/C in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:16,729 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/5ed7c25a8ad54d3080bb64ca407ad67f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/87cfd7645c9b4f7180817716b5956063, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/2ca65c4b99e84a57ab461e39e2d98975] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=35.8 K 2024-12-06T08:19:16,730 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ed7c25a8ad54d3080bb64ca407ad67f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473152375 2024-12-06T08:19:16,731 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 87cfd7645c9b4f7180817716b5956063, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733473152503 2024-12-06T08:19:16,731 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ca65c4b99e84a57ab461e39e2d98975, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733473154645 2024-12-06T08:19:16,733 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/A of 03df86e7064722e5116b657f067426bf into 910e37d4c48842dfb257f4963268db2b(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:16,733 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:16,733 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/A, priority=13, startTime=1733473156257; duration=0sec 2024-12-06T08:19:16,733 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:16,734 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:A 2024-12-06T08:19:16,767 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#C#compaction#154 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:16,769 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/eeaa7455a3894205a124972ac3f40efe is 50, key is test_row_0/C:col10/1733473154954/Put/seqid=0 2024-12-06T08:19:16,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742009_1185 (size=12527) 2024-12-06T08:19:16,801 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/eeaa7455a3894205a124972ac3f40efe as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/eeaa7455a3894205a124972ac3f40efe 2024-12-06T08:19:16,809 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/C of 03df86e7064722e5116b657f067426bf into eeaa7455a3894205a124972ac3f40efe(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:16,809 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:16,809 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/C, priority=13, startTime=1733473156257; duration=0sec 2024-12-06T08:19:16,809 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:16,809 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:C 2024-12-06T08:19:16,829 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/c8b6596e96b74d3f8a883938c59b1638 2024-12-06T08:19:16,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/52bd9bba7ffb48589e936d4eaf228494 is 50, key is test_row_0/C:col10/1733473155018/Put/seqid=0 2024-12-06T08:19:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742010_1186 (size=12151) 2024-12-06T08:19:16,849 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/52bd9bba7ffb48589e936d4eaf228494 2024-12-06T08:19:16,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/ac0b6fa898d34c099d56ae1bd75880c2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ac0b6fa898d34c099d56ae1bd75880c2 2024-12-06T08:19:16,862 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ac0b6fa898d34c099d56ae1bd75880c2, entries=150, sequenceid=194, filesize=30.4 K 2024-12-06T08:19:16,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/c8b6596e96b74d3f8a883938c59b1638 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/c8b6596e96b74d3f8a883938c59b1638 2024-12-06T08:19:16,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,870 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/c8b6596e96b74d3f8a883938c59b1638, entries=150, sequenceid=194, filesize=11.9 K 2024-12-06T08:19:16,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/52bd9bba7ffb48589e936d4eaf228494 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/52bd9bba7ffb48589e936d4eaf228494 2024-12-06T08:19:16,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,880 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/52bd9bba7ffb48589e936d4eaf228494, entries=150, sequenceid=194, filesize=11.9 K 2024-12-06T08:19:16,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,881 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 03df86e7064722e5116b657f067426bf in 535ms, sequenceid=194, compaction requested=false 2024-12-06T08:19:16,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:16,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:16,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-06T08:19:16,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-06T08:19:16,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-06T08:19:16,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6070 sec 2024-12-06T08:19:16,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.6130 sec 2024-12-06T08:19:16,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:16,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:17,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:19:17,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:17,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:17,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:17,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:17,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:17,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:17,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,208 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206049b47e691684b07bfb04abace130470_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473157190/Put/seqid=0 2024-12-06T08:19:17,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742012_1188 (size=27248) 2024-12-06T08:19:17,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,244 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,249 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206049b47e691684b07bfb04abace130470_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206049b47e691684b07bfb04abace130470_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:17,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,251 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/a6c10d1a2f3c4aed99d3c0f877a250ee, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:17,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/a6c10d1a2f3c4aed99d3c0f877a250ee is 175, key is test_row_0/A:col10/1733473157190/Put/seqid=0 2024-12-06T08:19:17,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742011_1187 (size=83035) 2024-12-06T08:19:17,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473217241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473217244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473217244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473217245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473217242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:17,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473217356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473217357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473217357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473217358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473217361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-06T08:19:17,379 INFO [Thread-670 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-06T08:19:17,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:17,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-06T08:19:17,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T08:19:17,382 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:17,383 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:17,383 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:17,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T08:19:17,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-06T08:19:17,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:17,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:17,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:17,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473217560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473217560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473217560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473217561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473217563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,655 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=209, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/a6c10d1a2f3c4aed99d3c0f877a250ee 2024-12-06T08:19:17,667 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/9337f0c7de6a46778525e8eafa4d41bd is 50, key is test_row_0/B:col10/1733473157190/Put/seqid=0 2024-12-06T08:19:17,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742013_1189 (size=12151) 2024-12-06T08:19:17,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T08:19:17,689 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-06T08:19:17,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:17,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:17,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:17,690 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,843 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-06T08:19:17,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:17,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:17,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:17,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473217863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473217864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473217864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473217864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:17,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473217871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T08:19:17,996 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:17,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-06T08:19:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:17,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:17,997 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:17,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:18,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/9337f0c7de6a46778525e8eafa4d41bd 2024-12-06T08:19:18,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/4b3b486a702c4a48926031c075798230 is 50, key is test_row_0/C:col10/1733473157190/Put/seqid=0 2024-12-06T08:19:18,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742014_1190 (size=12151) 2024-12-06T08:19:18,119 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/4b3b486a702c4a48926031c075798230 2024-12-06T08:19:18,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/a6c10d1a2f3c4aed99d3c0f877a250ee as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6c10d1a2f3c4aed99d3c0f877a250ee 2024-12-06T08:19:18,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6c10d1a2f3c4aed99d3c0f877a250ee, entries=450, sequenceid=209, filesize=81.1 K 2024-12-06T08:19:18,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/9337f0c7de6a46778525e8eafa4d41bd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/9337f0c7de6a46778525e8eafa4d41bd 2024-12-06T08:19:18,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/9337f0c7de6a46778525e8eafa4d41bd, entries=150, sequenceid=209, filesize=11.9 K 2024-12-06T08:19:18,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/4b3b486a702c4a48926031c075798230 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/4b3b486a702c4a48926031c075798230 2024-12-06T08:19:18,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/4b3b486a702c4a48926031c075798230, entries=150, sequenceid=209, filesize=11.9 K 2024-12-06T08:19:18,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 03df86e7064722e5116b657f067426bf in 958ms, sequenceid=209, compaction requested=true 2024-12-06T08:19:18,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:18,149 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:18,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:18,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:18,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:18,149 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:18,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:18,149 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:18,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:18,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-06T08:19:18,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:18,150 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T08:19:18,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:18,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:18,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:18,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:18,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:18,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:18,151 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:18,151 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/B is initiating minor compaction (all files) 2024-12-06T08:19:18,151 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/B in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:18,151 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a7d03ce79203462986c0e115ab9f0086, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/c8b6596e96b74d3f8a883938c59b1638, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/9337f0c7de6a46778525e8eafa4d41bd] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=36.0 K 2024-12-06T08:19:18,151 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 145621 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:18,151 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/A is initiating minor compaction (all files) 2024-12-06T08:19:18,151 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/A in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:18,151 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/910e37d4c48842dfb257f4963268db2b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ac0b6fa898d34c099d56ae1bd75880c2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6c10d1a2f3c4aed99d3c0f877a250ee] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=142.2 K 2024-12-06T08:19:18,152 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:18,152 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/910e37d4c48842dfb257f4963268db2b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ac0b6fa898d34c099d56ae1bd75880c2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6c10d1a2f3c4aed99d3c0f877a250ee] 2024-12-06T08:19:18,152 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 910e37d4c48842dfb257f4963268db2b, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733473154645 2024-12-06T08:19:18,152 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a7d03ce79203462986c0e115ab9f0086, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733473154645 2024-12-06T08:19:18,153 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting c8b6596e96b74d3f8a883938c59b1638, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733473155017 2024-12-06T08:19:18,153 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac0b6fa898d34c099d56ae1bd75880c2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733473155017 2024-12-06T08:19:18,154 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 9337f0c7de6a46778525e8eafa4d41bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733473157179 2024-12-06T08:19:18,154 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6c10d1a2f3c4aed99d3c0f877a250ee, keycount=450, bloomtype=ROW, size=81.1 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733473157167 2024-12-06T08:19:18,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ad6ecff5230d4b55826c3a8df9fa3897_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473157240/Put/seqid=0 2024-12-06T08:19:18,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742015_1191 (size=12304) 2024-12-06T08:19:18,197 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#B#compaction#160 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:18,198 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/05f5a2a76d354276ad440b55f279ea6e is 50, key is test_row_0/B:col10/1733473157190/Put/seqid=0 2024-12-06T08:19:18,201 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:18,209 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206244b66eacb084115bc8994e1b26bd4f4_03df86e7064722e5116b657f067426bf store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:18,211 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206244b66eacb084115bc8994e1b26bd4f4_03df86e7064722e5116b657f067426bf, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:18,212 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206244b66eacb084115bc8994e1b26bd4f4_03df86e7064722e5116b657f067426bf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:18,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742016_1192 (size=12629) 2024-12-06T08:19:18,237 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/05f5a2a76d354276ad440b55f279ea6e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/05f5a2a76d354276ad440b55f279ea6e 2024-12-06T08:19:18,245 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/B of 03df86e7064722e5116b657f067426bf into 05f5a2a76d354276ad440b55f279ea6e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:18,245 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:18,245 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/B, priority=13, startTime=1733473158149; duration=0sec 2024-12-06T08:19:18,245 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:18,245 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:B 2024-12-06T08:19:18,246 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:18,248 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:18,248 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/C is initiating minor compaction (all files) 2024-12-06T08:19:18,248 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/C in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:18,248 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/eeaa7455a3894205a124972ac3f40efe, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/52bd9bba7ffb48589e936d4eaf228494, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/4b3b486a702c4a48926031c075798230] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=36.0 K 2024-12-06T08:19:18,249 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting eeaa7455a3894205a124972ac3f40efe, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733473154645 2024-12-06T08:19:18,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742017_1193 (size=4469) 2024-12-06T08:19:18,249 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 52bd9bba7ffb48589e936d4eaf228494, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733473155017 2024-12-06T08:19:18,250 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b3b486a702c4a48926031c075798230, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733473157179 2024-12-06T08:19:18,250 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#A#compaction#161 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:18,251 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/762d70f596364456a055548dc87ecaa0 is 175, key is test_row_0/A:col10/1733473157190/Put/seqid=0 2024-12-06T08:19:18,267 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#C#compaction#162 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:18,267 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/b1efcb73ba1544c28a3984ce3cd1f3f5 is 50, key is test_row_0/C:col10/1733473157190/Put/seqid=0 2024-12-06T08:19:18,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742018_1194 (size=31583) 2024-12-06T08:19:18,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742019_1195 (size=12629) 2024-12-06T08:19:18,274 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/762d70f596364456a055548dc87ecaa0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/762d70f596364456a055548dc87ecaa0 2024-12-06T08:19:18,280 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/A of 03df86e7064722e5116b657f067426bf into 762d70f596364456a055548dc87ecaa0(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:18,280 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:18,280 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/A, priority=13, startTime=1733473158149; duration=0sec 2024-12-06T08:19:18,280 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:18,280 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:A 2024-12-06T08:19:18,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:18,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:18,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473218375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473218375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473218375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473218378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473218379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473218480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473218480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473218481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473218481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T08:19:18,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:18,601 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ad6ecff5230d4b55826c3a8df9fa3897_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ad6ecff5230d4b55826c3a8df9fa3897_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:18,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c317c1afd5904d01a20abb682e05e26b, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:18,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c317c1afd5904d01a20abb682e05e26b is 175, key is test_row_0/A:col10/1733473157240/Put/seqid=0 2024-12-06T08:19:18,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742020_1196 (size=31105) 2024-12-06T08:19:18,682 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/b1efcb73ba1544c28a3984ce3cd1f3f5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b1efcb73ba1544c28a3984ce3cd1f3f5 2024-12-06T08:19:18,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473218682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473218684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473218684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473218685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,690 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/C of 03df86e7064722e5116b657f067426bf into b1efcb73ba1544c28a3984ce3cd1f3f5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:18,690 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:18,690 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/C, priority=13, startTime=1733473158149; duration=0sec 2024-12-06T08:19:18,690 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:18,690 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:C 2024-12-06T08:19:18,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473218987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473218989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473218988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:18,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:18,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473218988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:19,008 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=233, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c317c1afd5904d01a20abb682e05e26b 2024-12-06T08:19:19,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/5ca61cbd84104b57b2df10b576e56c83 is 50, key is test_row_0/B:col10/1733473157240/Put/seqid=0 2024-12-06T08:19:19,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742021_1197 (size=12151) 2024-12-06T08:19:19,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:19,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473219381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:19,424 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/5ca61cbd84104b57b2df10b576e56c83 2024-12-06T08:19:19,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/dbd1268b7d094e8bae956fd3ce7667bd is 50, key is test_row_0/C:col10/1733473157240/Put/seqid=0 2024-12-06T08:19:19,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742022_1198 (size=12151) 2024-12-06T08:19:19,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T08:19:19,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:19,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:19,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473219494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:19,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:19,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473219494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:19,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473219493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:19,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:19,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473219497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:19,876 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/dbd1268b7d094e8bae956fd3ce7667bd 2024-12-06T08:19:19,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c317c1afd5904d01a20abb682e05e26b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c317c1afd5904d01a20abb682e05e26b 2024-12-06T08:19:19,887 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c317c1afd5904d01a20abb682e05e26b, entries=150, sequenceid=233, filesize=30.4 K 2024-12-06T08:19:19,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/5ca61cbd84104b57b2df10b576e56c83 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5ca61cbd84104b57b2df10b576e56c83 2024-12-06T08:19:19,897 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5ca61cbd84104b57b2df10b576e56c83, entries=150, sequenceid=233, filesize=11.9 K 2024-12-06T08:19:19,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/dbd1268b7d094e8bae956fd3ce7667bd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dbd1268b7d094e8bae956fd3ce7667bd 2024-12-06T08:19:19,904 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dbd1268b7d094e8bae956fd3ce7667bd, entries=150, sequenceid=233, filesize=11.9 K 2024-12-06T08:19:19,905 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 03df86e7064722e5116b657f067426bf in 1756ms, sequenceid=233, compaction requested=false 2024-12-06T08:19:19,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:19,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:19,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-06T08:19:19,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-06T08:19:19,909 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-06T08:19:19,909 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5250 sec 2024-12-06T08:19:19,912 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.5300 sec 2024-12-06T08:19:20,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:20,501 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-06T08:19:20,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:20,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:20,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:20,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:20,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:20,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:20,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120619f92fcd6008410f9af73c2607b248ba_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473158376/Put/seqid=0 2024-12-06T08:19:20,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742023_1199 (size=12304) 2024-12-06T08:19:20,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473220522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473220522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473220524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473220527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473220642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473220642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473220643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473220644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473220844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473220846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473220846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:20,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473220846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:20,924 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:20,929 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120619f92fcd6008410f9af73c2607b248ba_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120619f92fcd6008410f9af73c2607b248ba_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:20,930 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/e097be9998614a3988353e90f8c3c090, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:20,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/e097be9998614a3988353e90f8c3c090 is 175, key is test_row_0/A:col10/1733473158376/Put/seqid=0 2024-12-06T08:19:20,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742024_1200 (size=31105) 2024-12-06T08:19:21,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:21,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473221149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:21,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473221150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:21,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473221150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:21,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473221154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,339 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/e097be9998614a3988353e90f8c3c090 2024-12-06T08:19:21,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/139fb4e990ca408c979d631a0e4714e2 is 50, key is test_row_0/B:col10/1733473158376/Put/seqid=0 2024-12-06T08:19:21,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:21,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473221383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,385 DEBUG [Thread-664 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:19:21,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742025_1201 (size=12151) 2024-12-06T08:19:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-06T08:19:21,489 INFO [Thread-670 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-06T08:19:21,490 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:21,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-06T08:19:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T08:19:21,493 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:21,495 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:21,495 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:21,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T08:19:21,648 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T08:19:21,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:21,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:21,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:21,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:21,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:21,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:21,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:21,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473221655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:21,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473221664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:21,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473221665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:21,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473221665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/139fb4e990ca408c979d631a0e4714e2 2024-12-06T08:19:21,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T08:19:21,802 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T08:19:21,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:21,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:21,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:21,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:21,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:21,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:21,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/dd284e5f78904452bc7fdc89f65bba11 is 50, key is test_row_0/C:col10/1733473158376/Put/seqid=0 2024-12-06T08:19:21,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742026_1202 (size=12151) 2024-12-06T08:19:21,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/dd284e5f78904452bc7fdc89f65bba11 2024-12-06T08:19:21,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/e097be9998614a3988353e90f8c3c090 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/e097be9998614a3988353e90f8c3c090 2024-12-06T08:19:21,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/e097be9998614a3988353e90f8c3c090, entries=150, sequenceid=250, filesize=30.4 K 2024-12-06T08:19:21,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/139fb4e990ca408c979d631a0e4714e2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/139fb4e990ca408c979d631a0e4714e2 2024-12-06T08:19:21,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/139fb4e990ca408c979d631a0e4714e2, entries=150, sequenceid=250, filesize=11.9 K 2024-12-06T08:19:21,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/dd284e5f78904452bc7fdc89f65bba11 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dd284e5f78904452bc7fdc89f65bba11 2024-12-06T08:19:21,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dd284e5f78904452bc7fdc89f65bba11, entries=150, sequenceid=250, filesize=11.9 K 2024-12-06T08:19:21,870 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 03df86e7064722e5116b657f067426bf in 1368ms, sequenceid=250, compaction requested=true 2024-12-06T08:19:21,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:21,870 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:21,872 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93793 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:21,872 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/A is initiating minor compaction (all files) 2024-12-06T08:19:21,872 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/A in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:21,872 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/762d70f596364456a055548dc87ecaa0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c317c1afd5904d01a20abb682e05e26b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/e097be9998614a3988353e90f8c3c090] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=91.6 K 2024-12-06T08:19:21,872 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:21,872 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/762d70f596364456a055548dc87ecaa0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c317c1afd5904d01a20abb682e05e26b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/e097be9998614a3988353e90f8c3c090] 2024-12-06T08:19:21,873 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 762d70f596364456a055548dc87ecaa0, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733473157179 2024-12-06T08:19:21,873 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c317c1afd5904d01a20abb682e05e26b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733473157240 2024-12-06T08:19:21,874 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting e097be9998614a3988353e90f8c3c090, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733473158376 2024-12-06T08:19:21,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:21,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:21,876 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:21,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:21,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:21,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:21,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:21,877 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:21,877 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/B is initiating minor compaction (all files) 2024-12-06T08:19:21,877 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/B in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:21,878 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/05f5a2a76d354276ad440b55f279ea6e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5ca61cbd84104b57b2df10b576e56c83, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/139fb4e990ca408c979d631a0e4714e2] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=36.1 K 2024-12-06T08:19:21,878 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 05f5a2a76d354276ad440b55f279ea6e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733473157179 2024-12-06T08:19:21,879 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ca61cbd84104b57b2df10b576e56c83, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733473157240 2024-12-06T08:19:21,879 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 139fb4e990ca408c979d631a0e4714e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733473158376 2024-12-06T08:19:21,884 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:21,890 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#B#compaction#169 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:21,890 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/e834571521af477b8dc9b6319d6e0dd8 is 50, key is test_row_0/B:col10/1733473158376/Put/seqid=0 2024-12-06T08:19:21,892 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412064f92539acfb74a1095ee95c428995224_03df86e7064722e5116b657f067426bf store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:21,894 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412064f92539acfb74a1095ee95c428995224_03df86e7064722e5116b657f067426bf, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:21,894 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412064f92539acfb74a1095ee95c428995224_03df86e7064722e5116b657f067426bf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:21,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742027_1203 (size=12731) 2024-12-06T08:19:21,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742028_1204 (size=4469) 2024-12-06T08:19:21,939 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#A#compaction#168 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:21,940 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/24b042a9539048f9a839adb0a69dc7d6 is 175, key is test_row_0/A:col10/1733473158376/Put/seqid=0 2024-12-06T08:19:21,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742029_1205 (size=31685) 2024-12-06T08:19:21,956 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:21,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-06T08:19:21,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:21,957 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T08:19:21,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:21,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:21,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:21,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:21,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:21,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:21,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206065db5e633e342ecb532f7ef19c29b1a_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473160522/Put/seqid=0 2024-12-06T08:19:21,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742030_1206 (size=12454) 2024-12-06T08:19:22,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T08:19:22,332 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/e834571521af477b8dc9b6319d6e0dd8 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e834571521af477b8dc9b6319d6e0dd8 2024-12-06T08:19:22,338 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/B of 03df86e7064722e5116b657f067426bf into e834571521af477b8dc9b6319d6e0dd8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:22,338 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:22,338 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/B, priority=13, startTime=1733473161875; duration=0sec 2024-12-06T08:19:22,339 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:22,339 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:B 2024-12-06T08:19:22,339 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:22,340 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:22,340 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/C is initiating minor compaction (all files) 2024-12-06T08:19:22,340 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/C in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:22,340 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b1efcb73ba1544c28a3984ce3cd1f3f5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dbd1268b7d094e8bae956fd3ce7667bd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dd284e5f78904452bc7fdc89f65bba11] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=36.1 K 2024-12-06T08:19:22,341 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting b1efcb73ba1544c28a3984ce3cd1f3f5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733473157179 2024-12-06T08:19:22,341 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting dbd1268b7d094e8bae956fd3ce7667bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733473157240 2024-12-06T08:19:22,341 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting dd284e5f78904452bc7fdc89f65bba11, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733473158376 2024-12-06T08:19:22,352 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/24b042a9539048f9a839adb0a69dc7d6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/24b042a9539048f9a839adb0a69dc7d6 2024-12-06T08:19:22,355 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#C#compaction#171 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:22,356 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/3e2a82b0c9cd403aa1d361645854bb11 is 50, key is test_row_0/C:col10/1733473158376/Put/seqid=0 2024-12-06T08:19:22,357 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/A of 03df86e7064722e5116b657f067426bf into 24b042a9539048f9a839adb0a69dc7d6(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:22,358 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:22,358 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/A, priority=13, startTime=1733473161870; duration=0sec 2024-12-06T08:19:22,358 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:22,358 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:A 2024-12-06T08:19:22,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742031_1207 (size=12731) 2024-12-06T08:19:22,368 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/3e2a82b0c9cd403aa1d361645854bb11 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/3e2a82b0c9cd403aa1d361645854bb11 2024-12-06T08:19:22,379 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/C of 03df86e7064722e5116b657f067426bf into 3e2a82b0c9cd403aa1d361645854bb11(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:22,379 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:22,379 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/C, priority=13, startTime=1733473161876; duration=0sec 2024-12-06T08:19:22,380 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:22,380 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:C 2024-12-06T08:19:22,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:22,403 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206065db5e633e342ecb532f7ef19c29b1a_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206065db5e633e342ecb532f7ef19c29b1a_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:22,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/084d16ab2a7b4e1c84204c7652835024, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:22,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/084d16ab2a7b4e1c84204c7652835024 is 175, key is test_row_0/A:col10/1733473160522/Put/seqid=0 2024-12-06T08:19:22,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742032_1208 (size=31255) 2024-12-06T08:19:22,427 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=272, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/084d16ab2a7b4e1c84204c7652835024 2024-12-06T08:19:22,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/fe1955b8a2ce48bfbe735aeef19667db is 50, key is test_row_0/B:col10/1733473160522/Put/seqid=0 2024-12-06T08:19:22,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742033_1209 (size=12301) 2024-12-06T08:19:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T08:19:22,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:22,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473222677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473222677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473222678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473222679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473222780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473222780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473222782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473222782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,852 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/fe1955b8a2ce48bfbe735aeef19667db 2024-12-06T08:19:22,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/af345b5c97f44f8aa8a8afeefbb6d0a5 is 50, key is test_row_0/C:col10/1733473160522/Put/seqid=0 2024-12-06T08:19:22,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742034_1210 (size=12301) 2024-12-06T08:19:22,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473222982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473222983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473222983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:22,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:22,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473222984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,264 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/af345b5c97f44f8aa8a8afeefbb6d0a5 2024-12-06T08:19:23,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/084d16ab2a7b4e1c84204c7652835024 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/084d16ab2a7b4e1c84204c7652835024 2024-12-06T08:19:23,273 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/084d16ab2a7b4e1c84204c7652835024, entries=150, sequenceid=272, filesize=30.5 K 2024-12-06T08:19:23,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/fe1955b8a2ce48bfbe735aeef19667db as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/fe1955b8a2ce48bfbe735aeef19667db 2024-12-06T08:19:23,278 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/fe1955b8a2ce48bfbe735aeef19667db, entries=150, sequenceid=272, filesize=12.0 K 2024-12-06T08:19:23,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/af345b5c97f44f8aa8a8afeefbb6d0a5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/af345b5c97f44f8aa8a8afeefbb6d0a5 2024-12-06T08:19:23,284 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/af345b5c97f44f8aa8a8afeefbb6d0a5, entries=150, sequenceid=272, filesize=12.0 K 2024-12-06T08:19:23,285 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 03df86e7064722e5116b657f067426bf in 1328ms, sequenceid=272, compaction requested=false 2024-12-06T08:19:23,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:23,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:23,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-06T08:19:23,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-06T08:19:23,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:23,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-06T08:19:23,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T08:19:23,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7920 sec 2024-12-06T08:19:23,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:23,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:23,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:23,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:23,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:23,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:23,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.7990 sec 2024-12-06T08:19:23,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206206292d7e565498c9927d5073fcadf2e_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473162675/Put/seqid=0 2024-12-06T08:19:23,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473223308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473223310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473223311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473223311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742035_1211 (size=14994) 2024-12-06T08:19:23,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473223411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473223412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473223414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473223414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-06T08:19:23,598 INFO [Thread-670 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-06T08:19:23,599 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:23,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-06T08:19:23,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T08:19:23,601 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:23,601 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:23,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:23,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473223614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473223614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473223617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473223617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T08:19:23,737 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:23,742 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206206292d7e565498c9927d5073fcadf2e_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206206292d7e565498c9927d5073fcadf2e_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:23,743 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/b05d65b6dca648c480c181c04c176ee6, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:23,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/b05d65b6dca648c480c181c04c176ee6 is 175, key is test_row_0/A:col10/1733473162675/Put/seqid=0 2024-12-06T08:19:23,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742036_1212 (size=39949) 2024-12-06T08:19:23,752 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/b05d65b6dca648c480c181c04c176ee6 2024-12-06T08:19:23,758 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T08:19:23,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:23,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:23,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:23,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:23,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:23,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:23,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/5874d5f386804bd89fdf37db7472af57 is 50, key is test_row_0/B:col10/1733473162675/Put/seqid=0 2024-12-06T08:19:23,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742037_1213 (size=12301) 2024-12-06T08:19:23,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T08:19:23,911 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T08:19:23,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:23,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:23,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:23,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:23,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:23,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:23,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473223915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473223918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473223921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:23,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:23,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473223921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,064 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T08:19:24,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:24,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:24,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:24,065 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:24,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:24,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:24,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/5874d5f386804bd89fdf37db7472af57 2024-12-06T08:19:24,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/8795e7924fb84f61a92c951001297231 is 50, key is test_row_0/C:col10/1733473162675/Put/seqid=0 2024-12-06T08:19:24,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T08:19:24,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742038_1214 (size=12301) 2024-12-06T08:19:24,207 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/8795e7924fb84f61a92c951001297231 2024-12-06T08:19:24,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/b05d65b6dca648c480c181c04c176ee6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/b05d65b6dca648c480c181c04c176ee6 2024-12-06T08:19:24,217 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T08:19:24,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:24,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:24,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:24,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:24,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:24,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/b05d65b6dca648c480c181c04c176ee6, entries=200, sequenceid=290, filesize=39.0 K 2024-12-06T08:19:24,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:24,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/5874d5f386804bd89fdf37db7472af57 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5874d5f386804bd89fdf37db7472af57 2024-12-06T08:19:24,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5874d5f386804bd89fdf37db7472af57, entries=150, sequenceid=290, filesize=12.0 K 2024-12-06T08:19:24,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/8795e7924fb84f61a92c951001297231 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/8795e7924fb84f61a92c951001297231 2024-12-06T08:19:24,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/8795e7924fb84f61a92c951001297231, entries=150, sequenceid=290, filesize=12.0 K 2024-12-06T08:19:24,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 03df86e7064722e5116b657f067426bf in 949ms, sequenceid=290, compaction requested=true 2024-12-06T08:19:24,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:24,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:24,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:24,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:24,237 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:24,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:24,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:24,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T08:19:24,237 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:24,239 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:24,239 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/A is initiating minor compaction (all files) 2024-12-06T08:19:24,239 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/A in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:24,239 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/24b042a9539048f9a839adb0a69dc7d6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/084d16ab2a7b4e1c84204c7652835024, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/b05d65b6dca648c480c181c04c176ee6] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=100.5 K 2024-12-06T08:19:24,239 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:24,240 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:24,239 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/24b042a9539048f9a839adb0a69dc7d6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/084d16ab2a7b4e1c84204c7652835024, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/b05d65b6dca648c480c181c04c176ee6] 2024-12-06T08:19:24,240 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/B is initiating minor compaction (all files) 2024-12-06T08:19:24,240 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/B in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:24,240 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e834571521af477b8dc9b6319d6e0dd8, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/fe1955b8a2ce48bfbe735aeef19667db, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5874d5f386804bd89fdf37db7472af57] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=36.5 K 2024-12-06T08:19:24,240 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24b042a9539048f9a839adb0a69dc7d6, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733473158376 2024-12-06T08:19:24,241 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e834571521af477b8dc9b6319d6e0dd8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733473158376 2024-12-06T08:19:24,241 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 084d16ab2a7b4e1c84204c7652835024, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733473160517 2024-12-06T08:19:24,242 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting fe1955b8a2ce48bfbe735aeef19667db, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733473160517 2024-12-06T08:19:24,242 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b05d65b6dca648c480c181c04c176ee6, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733473162675 2024-12-06T08:19:24,242 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5874d5f386804bd89fdf37db7472af57, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733473162675 2024-12-06T08:19:24,269 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:24,272 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#B#compaction#178 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:24,273 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/2318f013efcf463a92b00300ddf7aa65 is 50, key is test_row_0/B:col10/1733473162675/Put/seqid=0 2024-12-06T08:19:24,290 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206e172b66df6234b9a80805dd411079ae6_03df86e7064722e5116b657f067426bf store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:24,293 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206e172b66df6234b9a80805dd411079ae6_03df86e7064722e5116b657f067426bf, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:24,293 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e172b66df6234b9a80805dd411079ae6_03df86e7064722e5116b657f067426bf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:24,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742039_1215 (size=12983) 2024-12-06T08:19:24,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742040_1216 (size=4469) 2024-12-06T08:19:24,369 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-06T08:19:24,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:24,370 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-06T08:19:24,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:24,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:24,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:24,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:24,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:24,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:24,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206192700510ecb4c17807b33feac50b085_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473163307/Put/seqid=0 2024-12-06T08:19:24,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742041_1217 (size=12454) 2024-12-06T08:19:24,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:24,387 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206192700510ecb4c17807b33feac50b085_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206192700510ecb4c17807b33feac50b085_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:24,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/deeb2cafaa8b421e85b192ee0996ce94, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:24,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/deeb2cafaa8b421e85b192ee0996ce94 is 175, key is test_row_0/A:col10/1733473163307/Put/seqid=0 2024-12-06T08:19:24,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742042_1218 (size=31255) 2024-12-06T08:19:24,418 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=311, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/deeb2cafaa8b421e85b192ee0996ce94 2024-12-06T08:19:24,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/4c6ecba3bbbf43b488605324cf5a1bdc is 50, key is test_row_0/B:col10/1733473163307/Put/seqid=0 2024-12-06T08:19:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:24,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:24,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742043_1219 (size=12301) 2024-12-06T08:19:24,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473224441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473224446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473224446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473224446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473224547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473224551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473224552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473224552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T08:19:24,709 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/2318f013efcf463a92b00300ddf7aa65 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/2318f013efcf463a92b00300ddf7aa65 2024-12-06T08:19:24,714 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/B of 03df86e7064722e5116b657f067426bf into 2318f013efcf463a92b00300ddf7aa65(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:24,714 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:24,714 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/B, priority=13, startTime=1733473164237; duration=0sec 2024-12-06T08:19:24,714 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:24,714 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:B 2024-12-06T08:19:24,714 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:24,716 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:24,716 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/C is initiating minor compaction (all files) 2024-12-06T08:19:24,716 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/C in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:24,716 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/3e2a82b0c9cd403aa1d361645854bb11, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/af345b5c97f44f8aa8a8afeefbb6d0a5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/8795e7924fb84f61a92c951001297231] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=36.5 K 2024-12-06T08:19:24,716 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e2a82b0c9cd403aa1d361645854bb11, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733473158376 2024-12-06T08:19:24,717 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting af345b5c97f44f8aa8a8afeefbb6d0a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733473160517 2024-12-06T08:19:24,717 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 8795e7924fb84f61a92c951001297231, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733473162675 2024-12-06T08:19:24,723 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#A#compaction#177 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:24,723 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/17dc4ab360e5414a9fbf4bbf9597b667 is 175, key is test_row_0/A:col10/1733473162675/Put/seqid=0 2024-12-06T08:19:24,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742044_1220 (size=31937) 2024-12-06T08:19:24,729 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#C#compaction#181 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:24,730 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/bce1fc1fe7fc4919a2032307804b2d97 is 50, key is test_row_0/C:col10/1733473162675/Put/seqid=0 2024-12-06T08:19:24,741 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/17dc4ab360e5414a9fbf4bbf9597b667 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/17dc4ab360e5414a9fbf4bbf9597b667 2024-12-06T08:19:24,747 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/A of 03df86e7064722e5116b657f067426bf into 17dc4ab360e5414a9fbf4bbf9597b667(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:24,747 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:24,747 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/A, priority=13, startTime=1733473164237; duration=0sec 2024-12-06T08:19:24,747 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:24,747 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:A 2024-12-06T08:19:24,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742045_1221 (size=12983) 2024-12-06T08:19:24,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473224752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473224752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473224753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473224755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:24,833 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/4c6ecba3bbbf43b488605324cf5a1bdc 2024-12-06T08:19:24,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/25eaf78fe35441869c04d116677cd6b1 is 50, key is test_row_0/C:col10/1733473163307/Put/seqid=0 2024-12-06T08:19:24,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742046_1222 (size=12301) 2024-12-06T08:19:25,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473225055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473225055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473225057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473225060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,157 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/bce1fc1fe7fc4919a2032307804b2d97 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/bce1fc1fe7fc4919a2032307804b2d97 2024-12-06T08:19:25,162 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/C of 03df86e7064722e5116b657f067426bf into bce1fc1fe7fc4919a2032307804b2d97(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:25,162 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:25,162 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/C, priority=13, startTime=1733473164237; duration=0sec 2024-12-06T08:19:25,162 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:25,162 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:C 2024-12-06T08:19:25,247 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/25eaf78fe35441869c04d116677cd6b1 2024-12-06T08:19:25,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/deeb2cafaa8b421e85b192ee0996ce94 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/deeb2cafaa8b421e85b192ee0996ce94 2024-12-06T08:19:25,257 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/deeb2cafaa8b421e85b192ee0996ce94, entries=150, sequenceid=311, filesize=30.5 K 2024-12-06T08:19:25,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/4c6ecba3bbbf43b488605324cf5a1bdc as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/4c6ecba3bbbf43b488605324cf5a1bdc 2024-12-06T08:19:25,263 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/4c6ecba3bbbf43b488605324cf5a1bdc, entries=150, sequenceid=311, filesize=12.0 K 2024-12-06T08:19:25,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/25eaf78fe35441869c04d116677cd6b1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/25eaf78fe35441869c04d116677cd6b1 2024-12-06T08:19:25,269 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/25eaf78fe35441869c04d116677cd6b1, entries=150, sequenceid=311, filesize=12.0 K 2024-12-06T08:19:25,270 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 03df86e7064722e5116b657f067426bf in 900ms, sequenceid=311, compaction requested=false 2024-12-06T08:19:25,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:25,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:25,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-06T08:19:25,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-06T08:19:25,272 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-06T08:19:25,272 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6700 sec 2024-12-06T08:19:25,274 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.6740 sec 2024-12-06T08:19:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:25,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-06T08:19:25,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:25,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:25,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:25,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:25,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:25,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:25,407 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063ecf51427fdd4a64943901e5d88f33be_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473164444/Put/seqid=0 2024-12-06T08:19:25,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742047_1223 (size=12454) 2024-12-06T08:19:25,416 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:25,420 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063ecf51427fdd4a64943901e5d88f33be_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063ecf51427fdd4a64943901e5d88f33be_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:25,422 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c5607c3c35604da1827db024c4a7cce2, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:25,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c5607c3c35604da1827db024c4a7cce2 is 175, key is test_row_0/A:col10/1733473164444/Put/seqid=0 2024-12-06T08:19:25,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742048_1224 (size=31255) 2024-12-06T08:19:25,430 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=331, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c5607c3c35604da1827db024c4a7cce2 2024-12-06T08:19:25,439 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/920c567b6bfb402b8bf5ca0b4390c80b is 50, key is test_row_0/B:col10/1733473164444/Put/seqid=0 2024-12-06T08:19:25,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473225444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742049_1225 (size=12301) 2024-12-06T08:19:25,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/920c567b6bfb402b8bf5ca0b4390c80b 2024-12-06T08:19:25,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/22cb935da648451697882fb622e7f5c6 is 50, key is test_row_0/C:col10/1733473164444/Put/seqid=0 2024-12-06T08:19:25,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742050_1226 (size=12301) 2024-12-06T08:19:25,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/22cb935da648451697882fb622e7f5c6 2024-12-06T08:19:25,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/c5607c3c35604da1827db024c4a7cce2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c5607c3c35604da1827db024c4a7cce2 2024-12-06T08:19:25,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c5607c3c35604da1827db024c4a7cce2, entries=150, sequenceid=331, filesize=30.5 K 2024-12-06T08:19:25,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/920c567b6bfb402b8bf5ca0b4390c80b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/920c567b6bfb402b8bf5ca0b4390c80b 2024-12-06T08:19:25,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/920c567b6bfb402b8bf5ca0b4390c80b, entries=150, sequenceid=331, filesize=12.0 K 2024-12-06T08:19:25,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/22cb935da648451697882fb622e7f5c6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/22cb935da648451697882fb622e7f5c6 2024-12-06T08:19:25,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/22cb935da648451697882fb622e7f5c6, entries=150, sequenceid=331, filesize=12.0 K 2024-12-06T08:19:25,511 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 03df86e7064722e5116b657f067426bf in 113ms, sequenceid=331, compaction requested=true 2024-12-06T08:19:25,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:25,511 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:25,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:25,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:25,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:25,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:25,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 03df86e7064722e5116b657f067426bf:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:25,512 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:25,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:25,512 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:25,513 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/A is initiating minor compaction (all files) 2024-12-06T08:19:25,513 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/A in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:25,513 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/17dc4ab360e5414a9fbf4bbf9597b667, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/deeb2cafaa8b421e85b192ee0996ce94, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c5607c3c35604da1827db024c4a7cce2] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=92.2 K 2024-12-06T08:19:25,513 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:25,513 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/17dc4ab360e5414a9fbf4bbf9597b667, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/deeb2cafaa8b421e85b192ee0996ce94, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c5607c3c35604da1827db024c4a7cce2] 2024-12-06T08:19:25,514 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:25,514 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/B is initiating minor compaction (all files) 2024-12-06T08:19:25,514 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/B in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:25,514 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/2318f013efcf463a92b00300ddf7aa65, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/4c6ecba3bbbf43b488605324cf5a1bdc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/920c567b6bfb402b8bf5ca0b4390c80b] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=36.7 K 2024-12-06T08:19:25,514 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17dc4ab360e5414a9fbf4bbf9597b667, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733473162675 2024-12-06T08:19:25,515 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2318f013efcf463a92b00300ddf7aa65, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733473162675 2024-12-06T08:19:25,515 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting deeb2cafaa8b421e85b192ee0996ce94, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733473163307 2024-12-06T08:19:25,515 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c6ecba3bbbf43b488605324cf5a1bdc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733473163307 2024-12-06T08:19:25,515 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5607c3c35604da1827db024c4a7cce2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733473164443 2024-12-06T08:19:25,516 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 920c567b6bfb402b8bf5ca0b4390c80b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733473164443 2024-12-06T08:19:25,541 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:25,542 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#B#compaction#186 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:25,542 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/89a252ca05444af0b768daa099b7d65e is 50, key is test_row_0/B:col10/1733473164444/Put/seqid=0 2024-12-06T08:19:25,544 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206b6b5ffc432ca440a997530906764ebd4_03df86e7064722e5116b657f067426bf store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:25,545 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206b6b5ffc432ca440a997530906764ebd4_03df86e7064722e5116b657f067426bf, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:25,546 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206b6b5ffc432ca440a997530906764ebd4_03df86e7064722e5116b657f067426bf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:25,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-06T08:19:25,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:25,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:25,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:25,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:25,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:25,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:25,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473225569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473225573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473225574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473225575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473225575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742052_1228 (size=4469) 2024-12-06T08:19:25,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d8470dfe4a6c4c26a1a0f0840cb771ec_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473165547/Put/seqid=0 2024-12-06T08:19:25,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742051_1227 (size=13085) 2024-12-06T08:19:25,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742053_1229 (size=14994) 2024-12-06T08:19:25,629 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:25,633 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d8470dfe4a6c4c26a1a0f0840cb771ec_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d8470dfe4a6c4c26a1a0f0840cb771ec_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:25,634 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/384415b972f14e0ba28ed0cb65eeb9d1, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:25,635 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/384415b972f14e0ba28ed0cb65eeb9d1 is 175, key is test_row_0/A:col10/1733473165547/Put/seqid=0 2024-12-06T08:19:25,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742054_1230 (size=39949) 2024-12-06T08:19:25,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473225676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473225676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473225679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473225682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473225683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-06T08:19:25,704 INFO [Thread-670 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-06T08:19:25,705 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-06T08:19:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T08:19:25,707 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:25,707 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:25,707 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:25,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T08:19:25,858 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T08:19:25,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:25,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:25,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:25,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:25,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:25,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:25,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473225880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473225880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473225882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473225887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473225887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:25,987 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#A#compaction#187 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:25,988 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/b0901895c90843b08abe7ad2103d22de is 175, key is test_row_0/A:col10/1733473164444/Put/seqid=0 2024-12-06T08:19:25,999 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/89a252ca05444af0b768daa099b7d65e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/89a252ca05444af0b768daa099b7d65e 2024-12-06T08:19:26,005 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/B of 03df86e7064722e5116b657f067426bf into 89a252ca05444af0b768daa099b7d65e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:26,005 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:26,005 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/B, priority=13, startTime=1733473165512; duration=0sec 2024-12-06T08:19:26,005 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:26,005 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:B 2024-12-06T08:19:26,005 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:26,006 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:26,007 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 03df86e7064722e5116b657f067426bf/C is initiating minor compaction (all files) 2024-12-06T08:19:26,007 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 03df86e7064722e5116b657f067426bf/C in TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:26,007 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/bce1fc1fe7fc4919a2032307804b2d97, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/25eaf78fe35441869c04d116677cd6b1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/22cb935da648451697882fb622e7f5c6] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp, totalSize=36.7 K 2024-12-06T08:19:26,007 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting bce1fc1fe7fc4919a2032307804b2d97, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733473162675 2024-12-06T08:19:26,008 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 25eaf78fe35441869c04d116677cd6b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733473163307 2024-12-06T08:19:26,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T08:19:26,009 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 22cb935da648451697882fb622e7f5c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733473164443 2024-12-06T08:19:26,011 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T08:19:26,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:26,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:26,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:26,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:26,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:26,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742055_1231 (size=32039) 2024-12-06T08:19:26,024 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/b0901895c90843b08abe7ad2103d22de as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/b0901895c90843b08abe7ad2103d22de 2024-12-06T08:19:26,031 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/A of 03df86e7064722e5116b657f067426bf into b0901895c90843b08abe7ad2103d22de(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:26,031 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:26,031 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/A, priority=13, startTime=1733473165511; duration=0sec 2024-12-06T08:19:26,031 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:26,031 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:A 2024-12-06T08:19:26,040 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=351, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/384415b972f14e0ba28ed0cb65eeb9d1 2024-12-06T08:19:26,046 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03df86e7064722e5116b657f067426bf#C#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:26,047 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/a59fa5e0d18842e1989dd834ac321197 is 50, key is test_row_0/C:col10/1733473164444/Put/seqid=0 2024-12-06T08:19:26,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/f6822bef9f13437d8135b163d1338767 is 50, key is test_row_0/B:col10/1733473165547/Put/seqid=0 2024-12-06T08:19:26,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742057_1233 (size=12301) 2024-12-06T08:19:26,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/f6822bef9f13437d8135b163d1338767 2024-12-06T08:19:26,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742056_1232 (size=13085) 2024-12-06T08:19:26,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/c215682c0ccb491cae2977846ae19ab2 is 50, key is test_row_0/C:col10/1733473165547/Put/seqid=0 2024-12-06T08:19:26,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742058_1234 (size=12301) 2024-12-06T08:19:26,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/c215682c0ccb491cae2977846ae19ab2 2024-12-06T08:19:26,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/384415b972f14e0ba28ed0cb65eeb9d1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/384415b972f14e0ba28ed0cb65eeb9d1 2024-12-06T08:19:26,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/384415b972f14e0ba28ed0cb65eeb9d1, entries=200, sequenceid=351, filesize=39.0 K 2024-12-06T08:19:26,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/f6822bef9f13437d8135b163d1338767 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f6822bef9f13437d8135b163d1338767 2024-12-06T08:19:26,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f6822bef9f13437d8135b163d1338767, entries=150, sequenceid=351, filesize=12.0 K 2024-12-06T08:19:26,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/c215682c0ccb491cae2977846ae19ab2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/c215682c0ccb491cae2977846ae19ab2 2024-12-06T08:19:26,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/c215682c0ccb491cae2977846ae19ab2, entries=150, sequenceid=351, filesize=12.0 K 2024-12-06T08:19:26,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 03df86e7064722e5116b657f067426bf in 611ms, sequenceid=351, compaction requested=false 2024-12-06T08:19:26,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:26,163 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-06T08:19:26,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:26,164 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-06T08:19:26,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:26,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:26,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:26,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:26,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:26,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:26,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206da5e2f3cd8a943b5a9cc927345eb66d5_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473165573/Put/seqid=0 2024-12-06T08:19:26,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:26,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:26,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742059_1235 (size=12454) 2024-12-06T08:19:26,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473226223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473226223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473226224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473226225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473226227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T08:19:26,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473226329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473226329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473226329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473226329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473226330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,496 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/a59fa5e0d18842e1989dd834ac321197 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/a59fa5e0d18842e1989dd834ac321197 2024-12-06T08:19:26,502 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 03df86e7064722e5116b657f067426bf/C of 03df86e7064722e5116b657f067426bf into a59fa5e0d18842e1989dd834ac321197(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:26,502 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:26,502 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf., storeName=03df86e7064722e5116b657f067426bf/C, priority=13, startTime=1733473165512; duration=0sec 2024-12-06T08:19:26,502 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:26,502 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03df86e7064722e5116b657f067426bf:C 2024-12-06T08:19:26,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473226532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473226533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473226533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473226534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473226534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:26,605 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206da5e2f3cd8a943b5a9cc927345eb66d5_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206da5e2f3cd8a943b5a9cc927345eb66d5_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:26,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/6eedf5ae2d9d41948a27fa409c25a3e0, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:26,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/6eedf5ae2d9d41948a27fa409c25a3e0 is 175, key is test_row_0/A:col10/1733473165573/Put/seqid=0 2024-12-06T08:19:26,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742060_1236 (size=31255) 2024-12-06T08:19:26,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T08:19:26,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473226835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473226835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473226836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473226837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:26,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:26,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473226839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:27,012 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=369, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/6eedf5ae2d9d41948a27fa409c25a3e0 2024-12-06T08:19:27,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/e39a948124a84a6382c082efbe6ebbd6 is 50, key is test_row_0/B:col10/1733473165573/Put/seqid=0 2024-12-06T08:19:27,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742061_1237 (size=12301) 2024-12-06T08:19:27,034 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/e39a948124a84a6382c082efbe6ebbd6 2024-12-06T08:19:27,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/35a305277ae74dbb8786b4ba717c4b5d is 50, key is test_row_0/C:col10/1733473165573/Put/seqid=0 2024-12-06T08:19:27,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742062_1238 (size=12301) 2024-12-06T08:19:27,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:27,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49874 deadline: 1733473227339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:27,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:27,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49942 deadline: 1733473227339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:27,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:27,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49928 deadline: 1733473227340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:27,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:27,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49896 deadline: 1733473227342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:27,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:27,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49868 deadline: 1733473227343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:27,450 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/35a305277ae74dbb8786b4ba717c4b5d 2024-12-06T08:19:27,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/6eedf5ae2d9d41948a27fa409c25a3e0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/6eedf5ae2d9d41948a27fa409c25a3e0 2024-12-06T08:19:27,460 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/6eedf5ae2d9d41948a27fa409c25a3e0, entries=150, sequenceid=369, filesize=30.5 K 2024-12-06T08:19:27,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/e39a948124a84a6382c082efbe6ebbd6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e39a948124a84a6382c082efbe6ebbd6 2024-12-06T08:19:27,466 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e39a948124a84a6382c082efbe6ebbd6, entries=150, sequenceid=369, filesize=12.0 K 2024-12-06T08:19:27,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/35a305277ae74dbb8786b4ba717c4b5d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/35a305277ae74dbb8786b4ba717c4b5d 2024-12-06T08:19:27,474 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/35a305277ae74dbb8786b4ba717c4b5d, entries=150, sequenceid=369, filesize=12.0 K 2024-12-06T08:19:27,475 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 03df86e7064722e5116b657f067426bf in 1311ms, sequenceid=369, compaction requested=true 2024-12-06T08:19:27,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:27,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:27,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-06T08:19:27,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-06T08:19:27,479 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-06T08:19:27,479 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7690 sec 2024-12-06T08:19:27,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.7750 sec 2024-12-06T08:19:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-06T08:19:27,811 INFO [Thread-670 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-06T08:19:27,812 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-06T08:19:27,814 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-06T08:19:27,815 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:27,815 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:27,854 DEBUG [Thread-673 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68f0be85 to 127.0.0.1:65195 2024-12-06T08:19:27,854 DEBUG [Thread-673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:27,855 DEBUG [Thread-671 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:65195 2024-12-06T08:19:27,855 DEBUG [Thread-671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:27,859 DEBUG [Thread-675 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x152377d4 to 127.0.0.1:65195 2024-12-06T08:19:27,860 DEBUG [Thread-675 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:27,860 DEBUG [Thread-677 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a52344f to 127.0.0.1:65195 2024-12-06T08:19:27,860 DEBUG [Thread-677 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-06T08:19:27,967 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:27,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-06T08:19:27,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:27,968 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-06T08:19:27,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:27,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:27,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:27,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:27,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:27,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060e4303886ee4410f8ae66064a6a05c22_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473166223/Put/seqid=0 2024-12-06T08:19:27,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742063_1239 (size=12454) 2024-12-06T08:19:28,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-06T08:19:28,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:28,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. as already flushing 2024-12-06T08:19:28,347 DEBUG [Thread-664 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b55744e to 127.0.0.1:65195 2024-12-06T08:19:28,347 DEBUG [Thread-662 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79982672 to 127.0.0.1:65195 2024-12-06T08:19:28,347 DEBUG [Thread-664 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:28,347 DEBUG [Thread-662 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:28,349 DEBUG [Thread-668 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x505d5ccd to 127.0.0.1:65195 2024-12-06T08:19:28,349 DEBUG [Thread-668 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:28,350 DEBUG [Thread-666 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x454f1431 to 127.0.0.1:65195 2024-12-06T08:19:28,350 DEBUG [Thread-666 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:28,355 DEBUG [Thread-660 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bcbdbdb to 127.0.0.1:65195 2024-12-06T08:19:28,355 DEBUG [Thread-660 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:28,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:28,383 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060e4303886ee4410f8ae66064a6a05c22_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060e4303886ee4410f8ae66064a6a05c22_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:28,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/fe69fa7c5a544377b9f43b57f1ffe846, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:28,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/fe69fa7c5a544377b9f43b57f1ffe846 is 175, key is test_row_0/A:col10/1733473166223/Put/seqid=0 2024-12-06T08:19:28,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742064_1240 (size=31255) 2024-12-06T08:19:28,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-06T08:19:28,789 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=390, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/fe69fa7c5a544377b9f43b57f1ffe846 2024-12-06T08:19:28,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/d8b1b4201e6d47c58964b8675675d9a7 is 50, key is test_row_0/B:col10/1733473166223/Put/seqid=0 2024-12-06T08:19:28,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742065_1241 (size=12301) 2024-12-06T08:19:28,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-06T08:19:29,199 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/d8b1b4201e6d47c58964b8675675d9a7 2024-12-06T08:19:29,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/9c189af82d0b43209c2ce0954dedd549 is 50, key is test_row_0/C:col10/1733473166223/Put/seqid=0 2024-12-06T08:19:29,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742066_1242 (size=12301) 2024-12-06T08:19:29,611 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/9c189af82d0b43209c2ce0954dedd549 2024-12-06T08:19:29,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/fe69fa7c5a544377b9f43b57f1ffe846 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/fe69fa7c5a544377b9f43b57f1ffe846 2024-12-06T08:19:29,618 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/fe69fa7c5a544377b9f43b57f1ffe846, entries=150, sequenceid=390, filesize=30.5 K 2024-12-06T08:19:29,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/d8b1b4201e6d47c58964b8675675d9a7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/d8b1b4201e6d47c58964b8675675d9a7 2024-12-06T08:19:29,623 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/d8b1b4201e6d47c58964b8675675d9a7, entries=150, sequenceid=390, filesize=12.0 K 2024-12-06T08:19:29,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/9c189af82d0b43209c2ce0954dedd549 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/9c189af82d0b43209c2ce0954dedd549 2024-12-06T08:19:29,627 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/9c189af82d0b43209c2ce0954dedd549, entries=150, sequenceid=390, filesize=12.0 K 2024-12-06T08:19:29,628 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=33.54 KB/34350 for 03df86e7064722e5116b657f067426bf in 1660ms, sequenceid=390, compaction requested=true 2024-12-06T08:19:29,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:29,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:29,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-06T08:19:29,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-06T08:19:29,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-06T08:19:29,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8140 sec 2024-12-06T08:19:29,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.8180 sec 2024-12-06T08:19:29,737 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:19:29,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-06T08:19:29,919 INFO [Thread-670 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-06T08:19:29,919 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T08:19:29,919 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-12-06T08:19:29,919 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-06T08:19:29,919 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-12-06T08:19:29,919 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-06T08:19:29,919 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 47 2024-12-06T08:19:29,919 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T08:19:29,920 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5512 2024-12-06T08:19:29,920 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5650 2024-12-06T08:19:29,920 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T08:19:29,920 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2349 2024-12-06T08:19:29,920 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7047 rows 2024-12-06T08:19:29,920 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2363 2024-12-06T08:19:29,920 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7089 rows 2024-12-06T08:19:29,920 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:19:29,920 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5caaf139 to 127.0.0.1:65195 2024-12-06T08:19:29,920 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:29,922 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T08:19:29,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T08:19:29,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:29,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T08:19:29,926 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473169926"}]},"ts":"1733473169926"} 2024-12-06T08:19:29,927 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T08:19:29,930 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T08:19:29,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T08:19:29,932 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, UNASSIGN}] 2024-12-06T08:19:29,933 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, UNASSIGN 2024-12-06T08:19:29,933 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=03df86e7064722e5116b657f067426bf, regionState=CLOSING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:29,934 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T08:19:29,934 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure 03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:19:30,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T08:19:30,085 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:30,086 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:30,086 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T08:19:30,086 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing 03df86e7064722e5116b657f067426bf, disabling compactions & flushes 2024-12-06T08:19:30,086 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:30,086 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:30,086 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. after waiting 0 ms 2024-12-06T08:19:30,086 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:30,086 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing 03df86e7064722e5116b657f067426bf 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-06T08:19:30,087 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=A 2024-12-06T08:19:30,087 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:30,087 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=B 2024-12-06T08:19:30,087 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:30,087 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 03df86e7064722e5116b657f067426bf, store=C 2024-12-06T08:19:30,087 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:30,092 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206a0cae04d2d6e442c85c6238991105ba1_03df86e7064722e5116b657f067426bf is 50, key is test_row_0/A:col10/1733473168346/Put/seqid=0 2024-12-06T08:19:30,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742067_1243 (size=9914) 2024-12-06T08:19:30,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T08:19:30,497 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:30,500 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206a0cae04d2d6e442c85c6238991105ba1_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206a0cae04d2d6e442c85c6238991105ba1_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:30,501 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/a6ec0a873a4448728c5a0f8e532330c7, store: [table=TestAcidGuarantees family=A region=03df86e7064722e5116b657f067426bf] 2024-12-06T08:19:30,502 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/a6ec0a873a4448728c5a0f8e532330c7 is 175, key is test_row_0/A:col10/1733473168346/Put/seqid=0 2024-12-06T08:19:30,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742068_1244 (size=22561) 2024-12-06T08:19:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T08:19:30,906 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=398, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/a6ec0a873a4448728c5a0f8e532330c7 2024-12-06T08:19:30,912 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/3005f2bbfd2c40b9b193db0bb42359da is 50, key is test_row_0/B:col10/1733473168346/Put/seqid=0 2024-12-06T08:19:30,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742069_1245 (size=9857) 2024-12-06T08:19:31,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T08:19:31,317 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/3005f2bbfd2c40b9b193db0bb42359da 2024-12-06T08:19:31,323 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/13704beb3a5c4058a0b2d4aebbe8f6a7 is 50, key is test_row_0/C:col10/1733473168346/Put/seqid=0 2024-12-06T08:19:31,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742070_1246 (size=9857) 2024-12-06T08:19:31,727 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/13704beb3a5c4058a0b2d4aebbe8f6a7 2024-12-06T08:19:31,736 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/A/a6ec0a873a4448728c5a0f8e532330c7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6ec0a873a4448728c5a0f8e532330c7 2024-12-06T08:19:31,740 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6ec0a873a4448728c5a0f8e532330c7, entries=100, sequenceid=398, filesize=22.0 K 2024-12-06T08:19:31,741 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/B/3005f2bbfd2c40b9b193db0bb42359da as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/3005f2bbfd2c40b9b193db0bb42359da 2024-12-06T08:19:31,744 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/3005f2bbfd2c40b9b193db0bb42359da, entries=100, sequenceid=398, filesize=9.6 K 2024-12-06T08:19:31,745 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/.tmp/C/13704beb3a5c4058a0b2d4aebbe8f6a7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/13704beb3a5c4058a0b2d4aebbe8f6a7 2024-12-06T08:19:31,748 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/13704beb3a5c4058a0b2d4aebbe8f6a7, entries=100, sequenceid=398, filesize=9.6 K 2024-12-06T08:19:31,748 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 03df86e7064722e5116b657f067426bf in 1662ms, sequenceid=398, compaction requested=true 2024-12-06T08:19:31,749 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/95acaa833da74d448abe1073c0700a52, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ce22768a1baf456d914ca2ad2c9fe42e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/20978e56318542308850ac5500d5168e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6aa73babdca4a5e99a69e8193c9762e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c790489617284ac797f75f042f9b6404, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/4dbf173dc850400b974418e20638795c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/7633bb991e034f1cb86b1f6f23abb21b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/93d6762f85b44f32913b32e20c97ef17, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/28815eb73bbc4ca6915eec6d0fdf65b6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/943b8887e64a4f358a6e5a0e221cc1d7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/0868e4ae11c049038b6cf660a03ba94f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/910e37d4c48842dfb257f4963268db2b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ac0b6fa898d34c099d56ae1bd75880c2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6c10d1a2f3c4aed99d3c0f877a250ee, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/762d70f596364456a055548dc87ecaa0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c317c1afd5904d01a20abb682e05e26b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/24b042a9539048f9a839adb0a69dc7d6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/e097be9998614a3988353e90f8c3c090, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/084d16ab2a7b4e1c84204c7652835024, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/b05d65b6dca648c480c181c04c176ee6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/17dc4ab360e5414a9fbf4bbf9597b667, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/deeb2cafaa8b421e85b192ee0996ce94, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c5607c3c35604da1827db024c4a7cce2] to archive 2024-12-06T08:19:31,750 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:19:31,752 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/95acaa833da74d448abe1073c0700a52 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/95acaa833da74d448abe1073c0700a52 2024-12-06T08:19:31,753 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ce22768a1baf456d914ca2ad2c9fe42e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ce22768a1baf456d914ca2ad2c9fe42e 2024-12-06T08:19:31,754 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/20978e56318542308850ac5500d5168e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/20978e56318542308850ac5500d5168e 2024-12-06T08:19:31,755 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6aa73babdca4a5e99a69e8193c9762e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6aa73babdca4a5e99a69e8193c9762e 2024-12-06T08:19:31,756 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c790489617284ac797f75f042f9b6404 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c790489617284ac797f75f042f9b6404 2024-12-06T08:19:31,757 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/4dbf173dc850400b974418e20638795c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/4dbf173dc850400b974418e20638795c 2024-12-06T08:19:31,758 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/7633bb991e034f1cb86b1f6f23abb21b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/7633bb991e034f1cb86b1f6f23abb21b 2024-12-06T08:19:31,759 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/93d6762f85b44f32913b32e20c97ef17 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/93d6762f85b44f32913b32e20c97ef17 2024-12-06T08:19:31,760 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/28815eb73bbc4ca6915eec6d0fdf65b6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/28815eb73bbc4ca6915eec6d0fdf65b6 2024-12-06T08:19:31,761 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/943b8887e64a4f358a6e5a0e221cc1d7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/943b8887e64a4f358a6e5a0e221cc1d7 2024-12-06T08:19:31,762 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/0868e4ae11c049038b6cf660a03ba94f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/0868e4ae11c049038b6cf660a03ba94f 2024-12-06T08:19:31,763 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/910e37d4c48842dfb257f4963268db2b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/910e37d4c48842dfb257f4963268db2b 2024-12-06T08:19:31,764 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ac0b6fa898d34c099d56ae1bd75880c2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/ac0b6fa898d34c099d56ae1bd75880c2 2024-12-06T08:19:31,765 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6c10d1a2f3c4aed99d3c0f877a250ee to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6c10d1a2f3c4aed99d3c0f877a250ee 2024-12-06T08:19:31,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/762d70f596364456a055548dc87ecaa0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/762d70f596364456a055548dc87ecaa0 2024-12-06T08:19:31,767 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c317c1afd5904d01a20abb682e05e26b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c317c1afd5904d01a20abb682e05e26b 2024-12-06T08:19:31,768 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/24b042a9539048f9a839adb0a69dc7d6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/24b042a9539048f9a839adb0a69dc7d6 2024-12-06T08:19:31,769 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/e097be9998614a3988353e90f8c3c090 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/e097be9998614a3988353e90f8c3c090 2024-12-06T08:19:31,770 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/084d16ab2a7b4e1c84204c7652835024 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/084d16ab2a7b4e1c84204c7652835024 2024-12-06T08:19:31,771 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/b05d65b6dca648c480c181c04c176ee6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/b05d65b6dca648c480c181c04c176ee6 2024-12-06T08:19:31,772 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/17dc4ab360e5414a9fbf4bbf9597b667 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/17dc4ab360e5414a9fbf4bbf9597b667 2024-12-06T08:19:31,773 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/deeb2cafaa8b421e85b192ee0996ce94 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/deeb2cafaa8b421e85b192ee0996ce94 2024-12-06T08:19:31,774 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c5607c3c35604da1827db024c4a7cce2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/c5607c3c35604da1827db024c4a7cce2 2024-12-06T08:19:31,775 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f8b0fe9be04649339527389601e646c6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/cfa011746b41459ba97363a7a9576389, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/487f2123ea9b41be900d08844aabbc9d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f12e31e0e49b4d018c3134044ad99e17, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/53ed1c8a95a64780b3b3a8dec6c848a2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/af6c72b240fb44f2b1a946b3c989ea6d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/678b6cc540704bc9899b85648d22099e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/ef413efdcc7b44f89bba13441edbc475, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/b4aa693090a1455da1c139e52ba18ccf, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e6b3909475a246d4a2d8d07d5385f292, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a7d03ce79203462986c0e115ab9f0086, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a684d831c1ac4fa0bba715f9203ae27a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/c8b6596e96b74d3f8a883938c59b1638, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/05f5a2a76d354276ad440b55f279ea6e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/9337f0c7de6a46778525e8eafa4d41bd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5ca61cbd84104b57b2df10b576e56c83, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e834571521af477b8dc9b6319d6e0dd8, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/139fb4e990ca408c979d631a0e4714e2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/fe1955b8a2ce48bfbe735aeef19667db, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/2318f013efcf463a92b00300ddf7aa65, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5874d5f386804bd89fdf37db7472af57, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/4c6ecba3bbbf43b488605324cf5a1bdc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/920c567b6bfb402b8bf5ca0b4390c80b] to archive 2024-12-06T08:19:31,776 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:19:31,777 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f8b0fe9be04649339527389601e646c6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f8b0fe9be04649339527389601e646c6 2024-12-06T08:19:31,778 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/cfa011746b41459ba97363a7a9576389 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/cfa011746b41459ba97363a7a9576389 2024-12-06T08:19:31,779 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/487f2123ea9b41be900d08844aabbc9d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/487f2123ea9b41be900d08844aabbc9d 2024-12-06T08:19:31,780 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f12e31e0e49b4d018c3134044ad99e17 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f12e31e0e49b4d018c3134044ad99e17 2024-12-06T08:19:31,781 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/53ed1c8a95a64780b3b3a8dec6c848a2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/53ed1c8a95a64780b3b3a8dec6c848a2 2024-12-06T08:19:31,782 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/af6c72b240fb44f2b1a946b3c989ea6d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/af6c72b240fb44f2b1a946b3c989ea6d 2024-12-06T08:19:31,783 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/678b6cc540704bc9899b85648d22099e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/678b6cc540704bc9899b85648d22099e 2024-12-06T08:19:31,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/ef413efdcc7b44f89bba13441edbc475 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/ef413efdcc7b44f89bba13441edbc475 2024-12-06T08:19:31,785 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/b4aa693090a1455da1c139e52ba18ccf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/b4aa693090a1455da1c139e52ba18ccf 2024-12-06T08:19:31,786 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e6b3909475a246d4a2d8d07d5385f292 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e6b3909475a246d4a2d8d07d5385f292 2024-12-06T08:19:31,787 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a7d03ce79203462986c0e115ab9f0086 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a7d03ce79203462986c0e115ab9f0086 2024-12-06T08:19:31,788 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a684d831c1ac4fa0bba715f9203ae27a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/a684d831c1ac4fa0bba715f9203ae27a 2024-12-06T08:19:31,789 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/c8b6596e96b74d3f8a883938c59b1638 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/c8b6596e96b74d3f8a883938c59b1638 2024-12-06T08:19:31,790 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/05f5a2a76d354276ad440b55f279ea6e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/05f5a2a76d354276ad440b55f279ea6e 2024-12-06T08:19:31,791 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/9337f0c7de6a46778525e8eafa4d41bd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/9337f0c7de6a46778525e8eafa4d41bd 2024-12-06T08:19:31,792 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5ca61cbd84104b57b2df10b576e56c83 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5ca61cbd84104b57b2df10b576e56c83 2024-12-06T08:19:31,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e834571521af477b8dc9b6319d6e0dd8 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e834571521af477b8dc9b6319d6e0dd8 2024-12-06T08:19:31,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/139fb4e990ca408c979d631a0e4714e2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/139fb4e990ca408c979d631a0e4714e2 2024-12-06T08:19:31,794 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/fe1955b8a2ce48bfbe735aeef19667db to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/fe1955b8a2ce48bfbe735aeef19667db 2024-12-06T08:19:31,795 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/2318f013efcf463a92b00300ddf7aa65 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/2318f013efcf463a92b00300ddf7aa65 2024-12-06T08:19:31,797 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5874d5f386804bd89fdf37db7472af57 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/5874d5f386804bd89fdf37db7472af57 2024-12-06T08:19:31,797 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/4c6ecba3bbbf43b488605324cf5a1bdc to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/4c6ecba3bbbf43b488605324cf5a1bdc 2024-12-06T08:19:31,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/920c567b6bfb402b8bf5ca0b4390c80b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/920c567b6bfb402b8bf5ca0b4390c80b 2024-12-06T08:19:31,800 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/a817f6bdc25b4addb692dfee14db392e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/34d8cb2ba93c469eb80d215c73b92f9e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/7cc083f858554b8ca22df4d3d54b4064, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/936960218ff24630948998f6ea5ddbe8, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/e2e65591150f47c6a8f2cce307b112f3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/47bc1221bd98479991aa4f9cd4290b87, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/01e46483a3f84609a126a88ce1eeb7bd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/5ed7c25a8ad54d3080bb64ca407ad67f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b42dfca9ee9446d3b802b689d150878d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/87cfd7645c9b4f7180817716b5956063, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/eeaa7455a3894205a124972ac3f40efe, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/2ca65c4b99e84a57ab461e39e2d98975, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/52bd9bba7ffb48589e936d4eaf228494, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b1efcb73ba1544c28a3984ce3cd1f3f5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/4b3b486a702c4a48926031c075798230, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dbd1268b7d094e8bae956fd3ce7667bd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/3e2a82b0c9cd403aa1d361645854bb11, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dd284e5f78904452bc7fdc89f65bba11, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/af345b5c97f44f8aa8a8afeefbb6d0a5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/bce1fc1fe7fc4919a2032307804b2d97, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/8795e7924fb84f61a92c951001297231, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/25eaf78fe35441869c04d116677cd6b1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/22cb935da648451697882fb622e7f5c6] to archive 2024-12-06T08:19:31,801 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:19:31,802 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/a817f6bdc25b4addb692dfee14db392e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/a817f6bdc25b4addb692dfee14db392e 2024-12-06T08:19:31,803 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/34d8cb2ba93c469eb80d215c73b92f9e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/34d8cb2ba93c469eb80d215c73b92f9e 2024-12-06T08:19:31,804 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/7cc083f858554b8ca22df4d3d54b4064 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/7cc083f858554b8ca22df4d3d54b4064 2024-12-06T08:19:31,805 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/936960218ff24630948998f6ea5ddbe8 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/936960218ff24630948998f6ea5ddbe8 2024-12-06T08:19:31,806 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/e2e65591150f47c6a8f2cce307b112f3 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/e2e65591150f47c6a8f2cce307b112f3 2024-12-06T08:19:31,807 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/47bc1221bd98479991aa4f9cd4290b87 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/47bc1221bd98479991aa4f9cd4290b87 2024-12-06T08:19:31,808 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/01e46483a3f84609a126a88ce1eeb7bd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/01e46483a3f84609a126a88ce1eeb7bd 2024-12-06T08:19:31,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/5ed7c25a8ad54d3080bb64ca407ad67f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/5ed7c25a8ad54d3080bb64ca407ad67f 2024-12-06T08:19:31,810 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b42dfca9ee9446d3b802b689d150878d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b42dfca9ee9446d3b802b689d150878d 2024-12-06T08:19:31,811 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/87cfd7645c9b4f7180817716b5956063 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/87cfd7645c9b4f7180817716b5956063 2024-12-06T08:19:31,813 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/eeaa7455a3894205a124972ac3f40efe to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/eeaa7455a3894205a124972ac3f40efe 2024-12-06T08:19:31,814 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/2ca65c4b99e84a57ab461e39e2d98975 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/2ca65c4b99e84a57ab461e39e2d98975 2024-12-06T08:19:31,815 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/52bd9bba7ffb48589e936d4eaf228494 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/52bd9bba7ffb48589e936d4eaf228494 2024-12-06T08:19:31,816 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b1efcb73ba1544c28a3984ce3cd1f3f5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/b1efcb73ba1544c28a3984ce3cd1f3f5 2024-12-06T08:19:31,817 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/4b3b486a702c4a48926031c075798230 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/4b3b486a702c4a48926031c075798230 2024-12-06T08:19:31,818 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dbd1268b7d094e8bae956fd3ce7667bd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dbd1268b7d094e8bae956fd3ce7667bd 2024-12-06T08:19:31,819 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/3e2a82b0c9cd403aa1d361645854bb11 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/3e2a82b0c9cd403aa1d361645854bb11 2024-12-06T08:19:31,820 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dd284e5f78904452bc7fdc89f65bba11 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/dd284e5f78904452bc7fdc89f65bba11 2024-12-06T08:19:31,821 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/af345b5c97f44f8aa8a8afeefbb6d0a5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/af345b5c97f44f8aa8a8afeefbb6d0a5 2024-12-06T08:19:31,822 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/bce1fc1fe7fc4919a2032307804b2d97 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/bce1fc1fe7fc4919a2032307804b2d97 2024-12-06T08:19:31,823 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/8795e7924fb84f61a92c951001297231 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/8795e7924fb84f61a92c951001297231 2024-12-06T08:19:31,824 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/25eaf78fe35441869c04d116677cd6b1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/25eaf78fe35441869c04d116677cd6b1 2024-12-06T08:19:31,825 DEBUG [StoreCloser-TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/22cb935da648451697882fb622e7f5c6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/22cb935da648451697882fb622e7f5c6 2024-12-06T08:19:31,829 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/recovered.edits/401.seqid, newMaxSeqId=401, maxSeqId=4 2024-12-06T08:19:31,830 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf. 2024-12-06T08:19:31,830 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for 03df86e7064722e5116b657f067426bf: 2024-12-06T08:19:31,832 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed 03df86e7064722e5116b657f067426bf 2024-12-06T08:19:31,832 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=03df86e7064722e5116b657f067426bf, regionState=CLOSED 2024-12-06T08:19:31,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-06T08:19:31,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure 03df86e7064722e5116b657f067426bf, server=b6b797fc3981,38041,1733473111442 in 1.8990 sec 2024-12-06T08:19:31,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-12-06T08:19:31,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=03df86e7064722e5116b657f067426bf, UNASSIGN in 1.9020 sec 2024-12-06T08:19:31,837 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-06T08:19:31,837 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9040 sec 2024-12-06T08:19:31,838 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473171838"}]},"ts":"1733473171838"} 2024-12-06T08:19:31,839 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T08:19:31,841 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T08:19:31,842 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9190 sec 2024-12-06T08:19:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-06T08:19:32,030 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-06T08:19:32,030 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T08:19:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:32,032 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:32,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-06T08:19:32,032 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:32,034 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,036 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/recovered.edits] 2024-12-06T08:19:32,039 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/384415b972f14e0ba28ed0cb65eeb9d1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/384415b972f14e0ba28ed0cb65eeb9d1 2024-12-06T08:19:32,040 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/6eedf5ae2d9d41948a27fa409c25a3e0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/6eedf5ae2d9d41948a27fa409c25a3e0 2024-12-06T08:19:32,042 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6ec0a873a4448728c5a0f8e532330c7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/a6ec0a873a4448728c5a0f8e532330c7 2024-12-06T08:19:32,043 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/b0901895c90843b08abe7ad2103d22de to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/b0901895c90843b08abe7ad2103d22de 2024-12-06T08:19:32,044 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/fe69fa7c5a544377b9f43b57f1ffe846 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/A/fe69fa7c5a544377b9f43b57f1ffe846 2024-12-06T08:19:32,046 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/3005f2bbfd2c40b9b193db0bb42359da to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/3005f2bbfd2c40b9b193db0bb42359da 2024-12-06T08:19:32,047 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/89a252ca05444af0b768daa099b7d65e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/89a252ca05444af0b768daa099b7d65e 2024-12-06T08:19:32,048 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/d8b1b4201e6d47c58964b8675675d9a7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/d8b1b4201e6d47c58964b8675675d9a7 2024-12-06T08:19:32,050 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e39a948124a84a6382c082efbe6ebbd6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/e39a948124a84a6382c082efbe6ebbd6 2024-12-06T08:19:32,051 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f6822bef9f13437d8135b163d1338767 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/B/f6822bef9f13437d8135b163d1338767 2024-12-06T08:19:32,053 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/13704beb3a5c4058a0b2d4aebbe8f6a7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/13704beb3a5c4058a0b2d4aebbe8f6a7 2024-12-06T08:19:32,054 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/35a305277ae74dbb8786b4ba717c4b5d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/35a305277ae74dbb8786b4ba717c4b5d 2024-12-06T08:19:32,055 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/9c189af82d0b43209c2ce0954dedd549 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/9c189af82d0b43209c2ce0954dedd549 2024-12-06T08:19:32,057 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/a59fa5e0d18842e1989dd834ac321197 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/a59fa5e0d18842e1989dd834ac321197 2024-12-06T08:19:32,058 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/c215682c0ccb491cae2977846ae19ab2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/C/c215682c0ccb491cae2977846ae19ab2 2024-12-06T08:19:32,061 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/recovered.edits/401.seqid to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf/recovered.edits/401.seqid 2024-12-06T08:19:32,061 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,062 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T08:19:32,062 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T08:19:32,063 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-06T08:19:32,066 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206049b47e691684b07bfb04abace130470_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206049b47e691684b07bfb04abace130470_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,067 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206051a0de44a704d0c99131764d3a4cbb8_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206051a0de44a704d0c99131764d3a4cbb8_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,069 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206065db5e633e342ecb532f7ef19c29b1a_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206065db5e633e342ecb532f7ef19c29b1a_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,070 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060e4303886ee4410f8ae66064a6a05c22_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060e4303886ee4410f8ae66064a6a05c22_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,072 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206192700510ecb4c17807b33feac50b085_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206192700510ecb4c17807b33feac50b085_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,073 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120619d41f1081154f219f680650b057f2d2_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120619d41f1081154f219f680650b057f2d2_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,074 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120619f92fcd6008410f9af73c2607b248ba_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120619f92fcd6008410f9af73c2607b248ba_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,075 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412061c68343e0a7c483c803402ac131f6bbc_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412061c68343e0a7c483c803402ac131f6bbc_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,077 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206206292d7e565498c9927d5073fcadf2e_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206206292d7e565498c9927d5073fcadf2e_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,078 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206295709b17e774cb5af9423cfa53b9828_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206295709b17e774cb5af9423cfa53b9828_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,079 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063ecf51427fdd4a64943901e5d88f33be_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063ecf51427fdd4a64943901e5d88f33be_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,080 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120671df75cf39014a869d6a2bc803795827_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120671df75cf39014a869d6a2bc803795827_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,081 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068857c7f46ac34e8092c28b9503e1c2dc_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068857c7f46ac34e8092c28b9503e1c2dc_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,083 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206a0cae04d2d6e442c85c6238991105ba1_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206a0cae04d2d6e442c85c6238991105ba1_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,084 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ad6ecff5230d4b55826c3a8df9fa3897_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ad6ecff5230d4b55826c3a8df9fa3897_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,085 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d6df13e497284926b06c140ee9543cbf_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d6df13e497284926b06c140ee9543cbf_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,087 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d7f29140dd0d4bba8f75b8ad99232055_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d7f29140dd0d4bba8f75b8ad99232055_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,088 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d8470dfe4a6c4c26a1a0f0840cb771ec_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d8470dfe4a6c4c26a1a0f0840cb771ec_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,089 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206da5e2f3cd8a943b5a9cc927345eb66d5_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206da5e2f3cd8a943b5a9cc927345eb66d5_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,090 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e1d67c866ee74023bcb580c828870195_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e1d67c866ee74023bcb580c828870195_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,092 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206fe23a7909bc448edbc5fed1baff7e7ca_03df86e7064722e5116b657f067426bf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206fe23a7909bc448edbc5fed1baff7e7ca_03df86e7064722e5116b657f067426bf 2024-12-06T08:19:32,093 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T08:19:32,095 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:32,098 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T08:19:32,100 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T08:19:32,101 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:32,101 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T08:19:32,101 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733473172101"}]},"ts":"9223372036854775807"} 2024-12-06T08:19:32,103 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T08:19:32,103 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 03df86e7064722e5116b657f067426bf, NAME => 'TestAcidGuarantees,,1733473144655.03df86e7064722e5116b657f067426bf.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T08:19:32,103 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T08:19:32,103 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733473172103"}]},"ts":"9223372036854775807"} 2024-12-06T08:19:32,105 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T08:19:32,107 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:32,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 77 msec 2024-12-06T08:19:32,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-06T08:19:32,133 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-06T08:19:32,144 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=242 (was 238) Potentially hanging thread: hconnection-0x5602a74-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/cluster_82222ce2-4af9-3ff1-6acd-1ed38eead44b/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/cluster_82222ce2-4af9-3ff1-6acd-1ed38eead44b/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1761172948_22 at /127.0.0.1:35700 [Waiting for operation #1009] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1761172948_22 at /127.0.0.1:35602 [Waiting for operation #1046] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-705260017_22 at /127.0.0.1:35728 [Waiting for operation #936] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-705260017_22 at /127.0.0.1:35742 [Waiting for operation #1002] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=465 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=424 (was 347) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7778 (was 8234) 2024-12-06T08:19:32,153 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=242, OpenFileDescriptor=465, MaxFileDescriptor=1048576, SystemLoadAverage=424, ProcessCount=11, AvailableMemoryMB=7777 2024-12-06T08:19:32,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T08:19:32,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:19:32,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T08:19:32,157 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:19:32,157 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:32,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-12-06T08:19:32,158 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:19:32,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-06T08:19:32,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742071_1247 (size=963) 2024-12-06T08:19:32,171 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156 2024-12-06T08:19:32,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742072_1248 (size=53) 2024-12-06T08:19:32,181 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:19:32,181 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 5310d6bf57bb8b709e1aec9222644a3d, disabling compactions & flushes 2024-12-06T08:19:32,181 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:32,181 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:32,181 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. after waiting 0 ms 2024-12-06T08:19:32,181 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:32,181 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:32,181 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:32,182 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:19:32,182 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733473172182"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733473172182"}]},"ts":"1733473172182"} 2024-12-06T08:19:32,183 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:19:32,184 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:19:32,184 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473172184"}]},"ts":"1733473172184"} 2024-12-06T08:19:32,185 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T08:19:32,189 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5310d6bf57bb8b709e1aec9222644a3d, ASSIGN}] 2024-12-06T08:19:32,190 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5310d6bf57bb8b709e1aec9222644a3d, ASSIGN 2024-12-06T08:19:32,190 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5310d6bf57bb8b709e1aec9222644a3d, ASSIGN; state=OFFLINE, location=b6b797fc3981,38041,1733473111442; forceNewPlan=false, retain=false 2024-12-06T08:19:32,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-06T08:19:32,341 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=5310d6bf57bb8b709e1aec9222644a3d, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,342 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure 5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:19:32,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-06T08:19:32,494 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,497 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:32,497 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:19:32,497 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:32,497 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:19:32,498 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:32,498 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:32,499 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:32,500 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:19:32,500 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5310d6bf57bb8b709e1aec9222644a3d columnFamilyName A 2024-12-06T08:19:32,500 DEBUG [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:32,501 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] regionserver.HStore(327): Store=5310d6bf57bb8b709e1aec9222644a3d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:19:32,501 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:32,502 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:19:32,502 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5310d6bf57bb8b709e1aec9222644a3d columnFamilyName B 2024-12-06T08:19:32,502 DEBUG [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:32,502 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] regionserver.HStore(327): Store=5310d6bf57bb8b709e1aec9222644a3d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:19:32,502 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:32,503 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:19:32,503 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5310d6bf57bb8b709e1aec9222644a3d columnFamilyName C 2024-12-06T08:19:32,503 DEBUG [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:19:32,504 INFO [StoreOpener-5310d6bf57bb8b709e1aec9222644a3d-1 {}] regionserver.HStore(327): Store=5310d6bf57bb8b709e1aec9222644a3d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:19:32,504 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:32,505 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:32,505 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:32,506 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:19:32,507 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:32,509 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:19:32,510 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened 5310d6bf57bb8b709e1aec9222644a3d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72879891, jitterRate=0.08599500358104706}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:19:32,510 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:32,511 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., pid=70, masterSystemTime=1733473172494 2024-12-06T08:19:32,512 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:32,512 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:32,512 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=5310d6bf57bb8b709e1aec9222644a3d, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-06T08:19:32,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure 5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 in 171 msec 2024-12-06T08:19:32,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-06T08:19:32,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5310d6bf57bb8b709e1aec9222644a3d, ASSIGN in 325 msec 2024-12-06T08:19:32,516 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:19:32,517 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473172516"}]},"ts":"1733473172516"} 2024-12-06T08:19:32,517 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T08:19:32,520 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:19:32,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 365 msec 2024-12-06T08:19:32,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-06T08:19:32,763 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-12-06T08:19:32,766 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b70f48f to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f66057f 2024-12-06T08:19:32,769 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53bfce45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,770 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,771 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,772 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:19:32,773 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46388, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:19:32,775 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58341641 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17b6adc5 2024-12-06T08:19:32,778 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a569490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,779 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44645c55 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@669e1999 2024-12-06T08:19:32,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6862e3ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,783 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64ee0130 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72aa9ee5 2024-12-06T08:19:32,785 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d296fed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,786 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683b64c3 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ec09297 2024-12-06T08:19:32,789 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8d0caa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,790 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07e55eb7 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4dfb20f6 2024-12-06T08:19:32,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43f04e0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,794 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17cf7fc0 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560ec309 2024-12-06T08:19:32,796 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fef31f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,797 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78b04266 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5886c0f2 2024-12-06T08:19:32,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb04aeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,801 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-12-06T08:19:32,803 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,804 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e998dd3 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@131ceb8f 2024-12-06T08:19:32,808 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d68f787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,809 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e4c79b8 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a78bf6d 2024-12-06T08:19:32,819 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e6bf6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:19:32,822 DEBUG [hconnection-0x3845a8b1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,822 DEBUG [hconnection-0x186ec7b8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,822 DEBUG [hconnection-0x63cfb1e8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,823 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,823 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45492, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,824 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45502, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,824 DEBUG [hconnection-0x669d2816-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:32,825 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45512, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-06T08:19:32,826 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:32,827 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:32,827 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:32,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-06T08:19:32,833 DEBUG [hconnection-0x4e33b366-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,834 DEBUG [hconnection-0xae735aa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,835 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,835 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,839 DEBUG [hconnection-0x6c8826ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,840 DEBUG [hconnection-0x2e9bb9b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,840 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45538, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,841 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45550, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:32,841 DEBUG [hconnection-0x70c9357d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T08:19:32,842 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45554, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,842 DEBUG [hconnection-0x60763863-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:19:32,843 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:19:32,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:32,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:32,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:32,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:32,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:32,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:32,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:32,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473232864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:32,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473232864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:32,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473232867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:32,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473232867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:32,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473232868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/04889eb3d522417f86f8670cddc8a602 is 50, key is test_row_0/A:col10/1733473172839/Put/seqid=0 2024-12-06T08:19:32,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742073_1249 (size=12001) 2024-12-06T08:19:32,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-06T08:19:32,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:32,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473232970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:32,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473232970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:32,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473232972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:32,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473232972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:32,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473232973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,984 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:32,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-06T08:19:32,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:32,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:32,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:32,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:32,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:32,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-06T08:19:33,137 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-06T08:19:33,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:33,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:33,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:33,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473233171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473233172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473233174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473233175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473233175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-06T08:19:33,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:33,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:33,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:33,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/04889eb3d522417f86f8670cddc8a602 2024-12-06T08:19:33,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/1f566ef2322a4c0ab9b95ed5d8995842 is 50, key is test_row_0/B:col10/1733473172839/Put/seqid=0 2024-12-06T08:19:33,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742074_1250 (size=12001) 2024-12-06T08:19:33,366 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/1f566ef2322a4c0ab9b95ed5d8995842 2024-12-06T08:19:33,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/43a01b8ec4dc4640a439150ae824d0db is 50, key is test_row_0/C:col10/1733473172839/Put/seqid=0 2024-12-06T08:19:33,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742075_1251 (size=12001) 2024-12-06T08:19:33,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/43a01b8ec4dc4640a439150ae824d0db 2024-12-06T08:19:33,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-06T08:19:33,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/04889eb3d522417f86f8670cddc8a602 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/04889eb3d522417f86f8670cddc8a602 2024-12-06T08:19:33,439 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/04889eb3d522417f86f8670cddc8a602, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T08:19:33,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/1f566ef2322a4c0ab9b95ed5d8995842 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1f566ef2322a4c0ab9b95ed5d8995842 2024-12-06T08:19:33,443 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-06T08:19:33,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:33,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:33,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:33,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1f566ef2322a4c0ab9b95ed5d8995842, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T08:19:33,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/43a01b8ec4dc4640a439150ae824d0db as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/43a01b8ec4dc4640a439150ae824d0db 2024-12-06T08:19:33,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/43a01b8ec4dc4640a439150ae824d0db, entries=150, sequenceid=13, filesize=11.7 K 2024-12-06T08:19:33,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 5310d6bf57bb8b709e1aec9222644a3d in 617ms, sequenceid=13, compaction requested=false 2024-12-06T08:19:33,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:33,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:33,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-06T08:19:33,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:33,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:33,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:33,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:33,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:33,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:33,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473233481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473233481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473233482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473233482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473233483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/74d390e8034f4c628b2a9fb98ef3a01d is 50, key is test_row_0/A:col10/1733473173477/Put/seqid=0 2024-12-06T08:19:33,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742076_1252 (size=12001) 2024-12-06T08:19:33,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/74d390e8034f4c628b2a9fb98ef3a01d 2024-12-06T08:19:33,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/772f33b1b9264136a2d54c136307cf99 is 50, key is test_row_0/B:col10/1733473173477/Put/seqid=0 2024-12-06T08:19:33,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742077_1253 (size=12001) 2024-12-06T08:19:33,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/772f33b1b9264136a2d54c136307cf99 2024-12-06T08:19:33,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/62585c00485445bab2309c8ba366bd58 is 50, key is test_row_0/C:col10/1733473173477/Put/seqid=0 2024-12-06T08:19:33,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473233585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473233586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473233593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473233593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473233593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,596 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-06T08:19:33,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:33,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:33,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:33,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:33,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742078_1254 (size=12001) 2024-12-06T08:19:33,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/62585c00485445bab2309c8ba366bd58 2024-12-06T08:19:33,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/74d390e8034f4c628b2a9fb98ef3a01d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/74d390e8034f4c628b2a9fb98ef3a01d 2024-12-06T08:19:33,611 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/74d390e8034f4c628b2a9fb98ef3a01d, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T08:19:33,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/772f33b1b9264136a2d54c136307cf99 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/772f33b1b9264136a2d54c136307cf99 2024-12-06T08:19:33,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/772f33b1b9264136a2d54c136307cf99, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T08:19:33,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/62585c00485445bab2309c8ba366bd58 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/62585c00485445bab2309c8ba366bd58 2024-12-06T08:19:33,625 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/62585c00485445bab2309c8ba366bd58, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T08:19:33,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 5310d6bf57bb8b709e1aec9222644a3d in 149ms, sequenceid=41, compaction requested=false 2024-12-06T08:19:33,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:33,750 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-06T08:19:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:33,751 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-06T08:19:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:33,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:33,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b0dba054e79842dbb9534254850f0f47 is 50, key is test_row_0/A:col10/1733473173482/Put/seqid=0 2024-12-06T08:19:33,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742079_1255 (size=12001) 2024-12-06T08:19:33,783 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b0dba054e79842dbb9534254850f0f47 2024-12-06T08:19:33,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/5ab722c5653e42e0aa4d347c604a01eb is 50, key is test_row_0/B:col10/1733473173482/Put/seqid=0 2024-12-06T08:19:33,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:33,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:33,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742080_1256 (size=12001) 2024-12-06T08:19:33,803 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/5ab722c5653e42e0aa4d347c604a01eb 2024-12-06T08:19:33,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/b354f828cd2440dea84b72e095d33801 is 50, key is test_row_0/C:col10/1733473173482/Put/seqid=0 2024-12-06T08:19:33,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742081_1257 (size=12001) 2024-12-06T08:19:33,823 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/b354f828cd2440dea84b72e095d33801 2024-12-06T08:19:33,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b0dba054e79842dbb9534254850f0f47 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b0dba054e79842dbb9534254850f0f47 2024-12-06T08:19:33,846 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b0dba054e79842dbb9534254850f0f47, entries=150, sequenceid=49, filesize=11.7 K 2024-12-06T08:19:33,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/5ab722c5653e42e0aa4d347c604a01eb as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5ab722c5653e42e0aa4d347c604a01eb 2024-12-06T08:19:33,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473233841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473233841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473233844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,852 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5ab722c5653e42e0aa4d347c604a01eb, entries=150, sequenceid=49, filesize=11.7 K 2024-12-06T08:19:33,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473233847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/b354f828cd2440dea84b72e095d33801 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/b354f828cd2440dea84b72e095d33801 2024-12-06T08:19:33,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473233849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,863 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/b354f828cd2440dea84b72e095d33801, entries=150, sequenceid=49, filesize=11.7 K 2024-12-06T08:19:33,864 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 5310d6bf57bb8b709e1aec9222644a3d in 113ms, sequenceid=49, compaction requested=true 2024-12-06T08:19:33,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:33,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:33,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-06T08:19:33,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-06T08:19:33,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-06T08:19:33,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0380 sec 2024-12-06T08:19:33,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.0420 sec 2024-12-06T08:19:33,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-06T08:19:33,931 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-06T08:19:33,933 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:33,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-06T08:19:33,937 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:33,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-06T08:19:33,937 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:33,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:33,954 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-06T08:19:33,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:33,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:33,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:33,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:33,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:33,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:33,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:33,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473233957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473233958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473233960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473233962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:33,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473233962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:33,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b6e712398049452eb0659e0441db9896 is 50, key is test_row_0/A:col10/1733473173828/Put/seqid=0 2024-12-06T08:19:34,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742082_1258 (size=12001) 2024-12-06T08:19:34,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-06T08:19:34,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473234076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473234081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473234081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473234081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,089 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:34,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:34,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473234164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,243 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:34,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:34,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-06T08:19:34,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473234282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473234284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473234285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473234285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,396 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:34,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:34,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b6e712398049452eb0659e0441db9896 2024-12-06T08:19:34,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/f6decd799720485ea36e98540a839648 is 50, key is test_row_0/B:col10/1733473173828/Put/seqid=0 2024-12-06T08:19:34,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742083_1259 (size=12001) 2024-12-06T08:19:34,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/f6decd799720485ea36e98540a839648 2024-12-06T08:19:34,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/570bc109770f47a69433d11fe0dcf2b4 is 50, key is test_row_0/C:col10/1733473173828/Put/seqid=0 2024-12-06T08:19:34,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742084_1260 (size=12001) 2024-12-06T08:19:34,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473234466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,549 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:34,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:34,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,550 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-06T08:19:34,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473234587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473234588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473234588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:34,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473234588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,703 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:34,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:34,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,703 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,856 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:34,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:34,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:34,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,858 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/570bc109770f47a69433d11fe0dcf2b4 2024-12-06T08:19:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:34,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b6e712398049452eb0659e0441db9896 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b6e712398049452eb0659e0441db9896 2024-12-06T08:19:34,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b6e712398049452eb0659e0441db9896, entries=150, sequenceid=78, filesize=11.7 K 2024-12-06T08:19:34,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/f6decd799720485ea36e98540a839648 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f6decd799720485ea36e98540a839648 2024-12-06T08:19:34,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f6decd799720485ea36e98540a839648, entries=150, sequenceid=78, filesize=11.7 K 2024-12-06T08:19:34,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/570bc109770f47a69433d11fe0dcf2b4 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/570bc109770f47a69433d11fe0dcf2b4 2024-12-06T08:19:34,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/570bc109770f47a69433d11fe0dcf2b4, entries=150, sequenceid=78, filesize=11.7 K 2024-12-06T08:19:34,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 5310d6bf57bb8b709e1aec9222644a3d in 925ms, sequenceid=78, compaction requested=true 2024-12-06T08:19:34,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:34,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:34,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:34,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:34,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:34,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:34,880 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:34,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:34,880 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:34,881 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:34,881 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:34,881 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/B is initiating minor compaction (all files) 2024-12-06T08:19:34,881 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/A is initiating minor compaction (all files) 2024-12-06T08:19:34,881 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/A in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,881 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/B in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,881 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/04889eb3d522417f86f8670cddc8a602, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/74d390e8034f4c628b2a9fb98ef3a01d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b0dba054e79842dbb9534254850f0f47, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b6e712398049452eb0659e0441db9896] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=46.9 K 2024-12-06T08:19:34,881 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1f566ef2322a4c0ab9b95ed5d8995842, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/772f33b1b9264136a2d54c136307cf99, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5ab722c5653e42e0aa4d347c604a01eb, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f6decd799720485ea36e98540a839648] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=46.9 K 2024-12-06T08:19:34,882 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f566ef2322a4c0ab9b95ed5d8995842, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733473172837 2024-12-06T08:19:34,882 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04889eb3d522417f86f8670cddc8a602, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733473172837 2024-12-06T08:19:34,883 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 772f33b1b9264136a2d54c136307cf99, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733473172863 2024-12-06T08:19:34,883 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74d390e8034f4c628b2a9fb98ef3a01d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733473172863 2024-12-06T08:19:34,883 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ab722c5653e42e0aa4d347c604a01eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733473173479 2024-12-06T08:19:34,883 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0dba054e79842dbb9534254850f0f47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733473173479 2024-12-06T08:19:34,884 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f6decd799720485ea36e98540a839648, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733473173828 2024-12-06T08:19:34,884 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6e712398049452eb0659e0441db9896, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733473173828 2024-12-06T08:19:34,896 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#A#compaction#213 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:34,896 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#B#compaction#214 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:34,897 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/911723aeb2074f69a4727570697bd869 is 50, key is test_row_0/A:col10/1733473173828/Put/seqid=0 2024-12-06T08:19:34,897 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/6e8836bcdfd8426cb8d0bc8a4e07ec88 is 50, key is test_row_0/B:col10/1733473173828/Put/seqid=0 2024-12-06T08:19:34,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742086_1262 (size=12139) 2024-12-06T08:19:34,937 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/6e8836bcdfd8426cb8d0bc8a4e07ec88 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/6e8836bcdfd8426cb8d0bc8a4e07ec88 2024-12-06T08:19:34,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742085_1261 (size=12139) 2024-12-06T08:19:34,948 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/B of 5310d6bf57bb8b709e1aec9222644a3d into 6e8836bcdfd8426cb8d0bc8a4e07ec88(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:34,948 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:34,948 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/B, priority=12, startTime=1733473174880; duration=0sec 2024-12-06T08:19:34,948 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/911723aeb2074f69a4727570697bd869 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/911723aeb2074f69a4727570697bd869 2024-12-06T08:19:34,948 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:34,948 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:B 2024-12-06T08:19:34,949 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:34,950 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:34,951 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/C is initiating minor compaction (all files) 2024-12-06T08:19:34,952 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/C in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:34,952 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/43a01b8ec4dc4640a439150ae824d0db, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/62585c00485445bab2309c8ba366bd58, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/b354f828cd2440dea84b72e095d33801, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/570bc109770f47a69433d11fe0dcf2b4] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=46.9 K 2024-12-06T08:19:34,952 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 43a01b8ec4dc4640a439150ae824d0db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733473172837 2024-12-06T08:19:34,953 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 62585c00485445bab2309c8ba366bd58, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733473172863 2024-12-06T08:19:34,953 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/A of 5310d6bf57bb8b709e1aec9222644a3d into 911723aeb2074f69a4727570697bd869(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:34,953 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:34,953 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/A, priority=12, startTime=1733473174880; duration=0sec 2024-12-06T08:19:34,953 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:34,953 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:A 2024-12-06T08:19:34,954 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting b354f828cd2440dea84b72e095d33801, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733473173479 2024-12-06T08:19:34,955 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 570bc109770f47a69433d11fe0dcf2b4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733473173828 2024-12-06T08:19:34,974 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#C#compaction#215 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:34,974 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/d9023e2e80104dd6a2f577ee9c6b4c93 is 50, key is test_row_0/C:col10/1733473173828/Put/seqid=0 2024-12-06T08:19:34,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:34,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:34,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742087_1263 (size=12139) 2024-12-06T08:19:34,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b5991595fe4f4aa6b9c9b943674e33b6 is 50, key is test_row_0/A:col10/1733473174978/Put/seqid=0 2024-12-06T08:19:35,000 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/d9023e2e80104dd6a2f577ee9c6b4c93 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d9023e2e80104dd6a2f577ee9c6b4c93 2024-12-06T08:19:35,009 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:35,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:35,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:35,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:35,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,023 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/C of 5310d6bf57bb8b709e1aec9222644a3d into d9023e2e80104dd6a2f577ee9c6b4c93(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:35,023 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:35,023 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/C, priority=12, startTime=1733473174880; duration=0sec 2024-12-06T08:19:35,024 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:35,024 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:C 2024-12-06T08:19:35,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742088_1264 (size=14341) 2024-12-06T08:19:35,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b5991595fe4f4aa6b9c9b943674e33b6 2024-12-06T08:19:35,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4bccc054396b4cef978bb8faf3997662 is 50, key is test_row_0/B:col10/1733473174978/Put/seqid=0 2024-12-06T08:19:35,055 DEBUG [master/b6b797fc3981:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region ad6c9f95f0b663a6596ce60f0a457f00 changed from -1.0 to 0.0, refreshing cache 2024-12-06T08:19:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-06T08:19:35,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:35,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473235089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:35,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473235092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:35,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473235092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:35,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473235092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:35,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473235096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742089_1265 (size=12001) 2024-12-06T08:19:35,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4bccc054396b4cef978bb8faf3997662 2024-12-06T08:19:35,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/fafae1f109784ed8b9ef64d557499757 is 50, key is test_row_0/C:col10/1733473174978/Put/seqid=0 2024-12-06T08:19:35,163 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:35,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:35,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:35,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:35,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742090_1266 (size=12001) 2024-12-06T08:19:35,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:35,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473235192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,317 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:35,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:35,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:35,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:35,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:35,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473235394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,471 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:35,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:35,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:35,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:35,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:35,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/fafae1f109784ed8b9ef64d557499757 2024-12-06T08:19:35,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b5991595fe4f4aa6b9c9b943674e33b6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b5991595fe4f4aa6b9c9b943674e33b6 2024-12-06T08:19:35,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b5991595fe4f4aa6b9c9b943674e33b6, entries=200, sequenceid=91, filesize=14.0 K 2024-12-06T08:19:35,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4bccc054396b4cef978bb8faf3997662 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bccc054396b4cef978bb8faf3997662 2024-12-06T08:19:35,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bccc054396b4cef978bb8faf3997662, entries=150, sequenceid=91, filesize=11.7 K 2024-12-06T08:19:35,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/fafae1f109784ed8b9ef64d557499757 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/fafae1f109784ed8b9ef64d557499757 2024-12-06T08:19:35,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/fafae1f109784ed8b9ef64d557499757, entries=150, sequenceid=91, filesize=11.7 K 2024-12-06T08:19:35,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 5310d6bf57bb8b709e1aec9222644a3d in 622ms, sequenceid=91, compaction requested=false 2024-12-06T08:19:35,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:35,624 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-06T08:19:35,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:35,625 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:19:35,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:35,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:35,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:35,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:35,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:35,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:35,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/def887f0d7424931a5d6ebe4fb0ed61e is 50, key is test_row_0/A:col10/1733473175076/Put/seqid=0 2024-12-06T08:19:35,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742091_1267 (size=12001) 2024-12-06T08:19:35,643 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/def887f0d7424931a5d6ebe4fb0ed61e 2024-12-06T08:19:35,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4bd8614236e14fa0a92486217567a1dc is 50, key is test_row_0/B:col10/1733473175076/Put/seqid=0 2024-12-06T08:19:35,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742092_1268 (size=12001) 2024-12-06T08:19:35,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:35,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:35,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:35,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473235717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:35,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473235819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:36,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:36,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473236022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:36,058 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4bd8614236e14fa0a92486217567a1dc 2024-12-06T08:19:36,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-06T08:19:36,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/1036540952144fa6a3acf41f1c02ab82 is 50, key is test_row_0/C:col10/1733473175076/Put/seqid=0 2024-12-06T08:19:36,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742093_1269 (size=12001) 2024-12-06T08:19:36,101 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/1036540952144fa6a3acf41f1c02ab82 2024-12-06T08:19:36,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:36,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473236098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:36,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:36,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473236100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:36,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:36,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473236103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:36,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:36,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473236106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:36,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/def887f0d7424931a5d6ebe4fb0ed61e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/def887f0d7424931a5d6ebe4fb0ed61e 2024-12-06T08:19:36,116 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/def887f0d7424931a5d6ebe4fb0ed61e, entries=150, sequenceid=117, filesize=11.7 K 2024-12-06T08:19:36,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4bd8614236e14fa0a92486217567a1dc as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bd8614236e14fa0a92486217567a1dc 2024-12-06T08:19:36,124 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bd8614236e14fa0a92486217567a1dc, entries=150, sequenceid=117, filesize=11.7 K 2024-12-06T08:19:36,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/1036540952144fa6a3acf41f1c02ab82 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/1036540952144fa6a3acf41f1c02ab82 2024-12-06T08:19:36,137 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/1036540952144fa6a3acf41f1c02ab82, entries=150, sequenceid=117, filesize=11.7 K 2024-12-06T08:19:36,138 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 5310d6bf57bb8b709e1aec9222644a3d in 513ms, sequenceid=117, compaction requested=true 2024-12-06T08:19:36,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:36,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:36,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-06T08:19:36,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-06T08:19:36,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-06T08:19:36,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2030 sec 2024-12-06T08:19:36,145 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 2.2110 sec 2024-12-06T08:19:36,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:36,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T08:19:36,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:36,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:36,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:36,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:36,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:36,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:36,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/4cff005d68b541ecbd1941222dcd720c is 50, key is test_row_0/A:col10/1733473175706/Put/seqid=0 2024-12-06T08:19:36,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742094_1270 (size=12051) 2024-12-06T08:19:36,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:36,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473236410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:36,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:36,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473236514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:36,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473236718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:36,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/4cff005d68b541ecbd1941222dcd720c 2024-12-06T08:19:36,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/34fe811edc044ca4adb6eabdeb1a7859 is 50, key is test_row_0/B:col10/1733473175706/Put/seqid=0 2024-12-06T08:19:36,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742095_1271 (size=12051) 2024-12-06T08:19:36,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/34fe811edc044ca4adb6eabdeb1a7859 2024-12-06T08:19:36,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/2efa7f55835a4bc8b7cb759bd42ef431 is 50, key is test_row_0/C:col10/1733473175706/Put/seqid=0 2024-12-06T08:19:36,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742096_1272 (size=12051) 2024-12-06T08:19:37,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:37,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473237022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:37,296 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/2efa7f55835a4bc8b7cb759bd42ef431 2024-12-06T08:19:37,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/4cff005d68b541ecbd1941222dcd720c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4cff005d68b541ecbd1941222dcd720c 2024-12-06T08:19:37,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4cff005d68b541ecbd1941222dcd720c, entries=150, sequenceid=129, filesize=11.8 K 2024-12-06T08:19:37,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/34fe811edc044ca4adb6eabdeb1a7859 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/34fe811edc044ca4adb6eabdeb1a7859 2024-12-06T08:19:37,310 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/34fe811edc044ca4adb6eabdeb1a7859, entries=150, sequenceid=129, filesize=11.8 K 2024-12-06T08:19:37,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/2efa7f55835a4bc8b7cb759bd42ef431 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/2efa7f55835a4bc8b7cb759bd42ef431 2024-12-06T08:19:37,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/2efa7f55835a4bc8b7cb759bd42ef431, entries=150, sequenceid=129, filesize=11.8 K 2024-12-06T08:19:37,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 5310d6bf57bb8b709e1aec9222644a3d in 989ms, sequenceid=129, compaction requested=true 2024-12-06T08:19:37,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:37,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:37,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:37,315 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:37,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:37,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:37,315 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:37,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:37,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:37,317 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:37,317 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50532 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:37,317 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/B is initiating minor compaction (all files) 2024-12-06T08:19:37,317 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/A is initiating minor compaction (all files) 2024-12-06T08:19:37,317 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/A in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:37,317 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/911723aeb2074f69a4727570697bd869, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b5991595fe4f4aa6b9c9b943674e33b6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/def887f0d7424931a5d6ebe4fb0ed61e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4cff005d68b541ecbd1941222dcd720c] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=49.3 K 2024-12-06T08:19:37,318 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 911723aeb2074f69a4727570697bd869, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733473173828 2024-12-06T08:19:37,318 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/B in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:37,318 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5991595fe4f4aa6b9c9b943674e33b6, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733473174975 2024-12-06T08:19:37,318 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/6e8836bcdfd8426cb8d0bc8a4e07ec88, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bccc054396b4cef978bb8faf3997662, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bd8614236e14fa0a92486217567a1dc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/34fe811edc044ca4adb6eabdeb1a7859] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=47.1 K 2024-12-06T08:19:37,319 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting def887f0d7424931a5d6ebe4fb0ed61e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733473175076 2024-12-06T08:19:37,319 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e8836bcdfd8426cb8d0bc8a4e07ec88, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733473173828 2024-12-06T08:19:37,319 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cff005d68b541ecbd1941222dcd720c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473175706 2024-12-06T08:19:37,319 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bccc054396b4cef978bb8faf3997662, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733473174975 2024-12-06T08:19:37,320 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bd8614236e14fa0a92486217567a1dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733473175076 2024-12-06T08:19:37,320 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 34fe811edc044ca4adb6eabdeb1a7859, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473175706 2024-12-06T08:19:37,329 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#A#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:37,330 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/d38b36100093481c971b971015756f07 is 50, key is test_row_0/A:col10/1733473175706/Put/seqid=0 2024-12-06T08:19:37,331 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#B#compaction#226 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:37,331 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/1e37ad58156f430bb2411e290dbc46b0 is 50, key is test_row_0/B:col10/1733473175706/Put/seqid=0 2024-12-06T08:19:37,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742097_1273 (size=12325) 2024-12-06T08:19:37,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742098_1274 (size=12325) 2024-12-06T08:19:37,356 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/1e37ad58156f430bb2411e290dbc46b0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1e37ad58156f430bb2411e290dbc46b0 2024-12-06T08:19:37,361 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/B of 5310d6bf57bb8b709e1aec9222644a3d into 1e37ad58156f430bb2411e290dbc46b0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:37,361 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:37,361 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/B, priority=12, startTime=1733473177315; duration=0sec 2024-12-06T08:19:37,361 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:37,361 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:B 2024-12-06T08:19:37,361 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:37,364 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T08:19:37,366 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:37,366 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/C is initiating minor compaction (all files) 2024-12-06T08:19:37,366 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/C in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:37,366 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d9023e2e80104dd6a2f577ee9c6b4c93, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/fafae1f109784ed8b9ef64d557499757, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/1036540952144fa6a3acf41f1c02ab82, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/2efa7f55835a4bc8b7cb759bd42ef431] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=47.1 K 2024-12-06T08:19:37,375 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting d9023e2e80104dd6a2f577ee9c6b4c93, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733473173828 2024-12-06T08:19:37,375 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting fafae1f109784ed8b9ef64d557499757, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733473174975 2024-12-06T08:19:37,376 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1036540952144fa6a3acf41f1c02ab82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733473175076 2024-12-06T08:19:37,376 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2efa7f55835a4bc8b7cb759bd42ef431, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473175706 2024-12-06T08:19:37,403 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#C#compaction#227 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:37,405 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/4ad4584412494429befb59ea0040dc2a is 50, key is test_row_0/C:col10/1733473175706/Put/seqid=0 2024-12-06T08:19:37,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742099_1275 (size=12325) 2024-12-06T08:19:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:37,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:19:37,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:37,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:37,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:37,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:37,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:37,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:37,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/25839990540c453592bc9ac4942f229d is 50, key is test_row_0/A:col10/1733473177530/Put/seqid=0 2024-12-06T08:19:37,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:37,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473237549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:37,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742100_1276 (size=12151) 2024-12-06T08:19:37,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473237654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:37,754 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/d38b36100093481c971b971015756f07 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d38b36100093481c971b971015756f07 2024-12-06T08:19:37,760 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/A of 5310d6bf57bb8b709e1aec9222644a3d into d38b36100093481c971b971015756f07(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:37,760 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:37,760 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/A, priority=12, startTime=1733473177315; duration=0sec 2024-12-06T08:19:37,760 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:37,760 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:A 2024-12-06T08:19:37,854 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/4ad4584412494429befb59ea0040dc2a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/4ad4584412494429befb59ea0040dc2a 2024-12-06T08:19:37,859 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/C of 5310d6bf57bb8b709e1aec9222644a3d into 4ad4584412494429befb59ea0040dc2a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:37,859 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:37,859 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/C, priority=12, startTime=1733473177315; duration=0sec 2024-12-06T08:19:37,859 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:37,859 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:C 2024-12-06T08:19:37,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473237858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:37,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/25839990540c453592bc9ac4942f229d 2024-12-06T08:19:37,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/5cf712284811486e89e1f411adf94ad2 is 50, key is test_row_0/B:col10/1733473177530/Put/seqid=0 2024-12-06T08:19:37,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742101_1277 (size=12151) 2024-12-06T08:19:37,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/5cf712284811486e89e1f411adf94ad2 2024-12-06T08:19:38,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/ab53af954e5c4d879bfd5e55151c3b32 is 50, key is test_row_0/C:col10/1733473177530/Put/seqid=0 2024-12-06T08:19:38,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742102_1278 (size=12151) 2024-12-06T08:19:38,027 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/ab53af954e5c4d879bfd5e55151c3b32 2024-12-06T08:19:38,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/25839990540c453592bc9ac4942f229d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/25839990540c453592bc9ac4942f229d 2024-12-06T08:19:38,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/25839990540c453592bc9ac4942f229d, entries=150, sequenceid=155, filesize=11.9 K 2024-12-06T08:19:38,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/5cf712284811486e89e1f411adf94ad2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5cf712284811486e89e1f411adf94ad2 2024-12-06T08:19:38,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5cf712284811486e89e1f411adf94ad2, entries=150, sequenceid=155, filesize=11.9 K 2024-12-06T08:19:38,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/ab53af954e5c4d879bfd5e55151c3b32 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab53af954e5c4d879bfd5e55151c3b32 2024-12-06T08:19:38,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab53af954e5c4d879bfd5e55151c3b32, entries=150, sequenceid=155, filesize=11.9 K 2024-12-06T08:19:38,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 5310d6bf57bb8b709e1aec9222644a3d in 525ms, sequenceid=155, compaction requested=false 2024-12-06T08:19:38,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-06T08:19:38,060 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-06T08:19:38,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-06T08:19:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T08:19:38,066 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:38,067 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:38,067 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:38,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T08:19:38,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:38,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:38,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:38,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:38,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:38,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:38,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/cf22fbd8618c401ab979bd7955195d61 is 50, key is test_row_0/A:col10/1733473178107/Put/seqid=0 2024-12-06T08:19:38,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742103_1279 (size=12151) 2024-12-06T08:19:38,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/cf22fbd8618c401ab979bd7955195d61 2024-12-06T08:19:38,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/2e0013a500fc4aa299e770784c8638df is 50, key is test_row_0/B:col10/1733473178107/Put/seqid=0 2024-12-06T08:19:38,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473238135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473238137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473238137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473238137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742104_1280 (size=12151) 2024-12-06T08:19:38,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473238163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T08:19:38,225 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T08:19:38,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:38,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473238240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473238240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473238241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473238241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T08:19:38,378 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T08:19:38,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:38,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473238443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473238443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473238443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473238444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,531 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T08:19:38,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:38,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/2e0013a500fc4aa299e770784c8638df 2024-12-06T08:19:38,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/dbf124e1127f4750807677c1ff0ad843 is 50, key is test_row_0/C:col10/1733473178107/Put/seqid=0 2024-12-06T08:19:38,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742105_1281 (size=12151) 2024-12-06T08:19:38,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T08:19:38,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473238669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,684 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T08:19:38,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:38,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473238745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473238745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473238745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:38,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473238746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,837 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T08:19:38,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:38,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,838 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:38,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/dbf124e1127f4750807677c1ff0ad843 2024-12-06T08:19:38,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/cf22fbd8618c401ab979bd7955195d61 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/cf22fbd8618c401ab979bd7955195d61 2024-12-06T08:19:38,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/cf22fbd8618c401ab979bd7955195d61, entries=150, sequenceid=170, filesize=11.9 K 2024-12-06T08:19:38,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/2e0013a500fc4aa299e770784c8638df as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2e0013a500fc4aa299e770784c8638df 2024-12-06T08:19:38,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2e0013a500fc4aa299e770784c8638df, entries=150, sequenceid=170, filesize=11.9 K 2024-12-06T08:19:38,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/dbf124e1127f4750807677c1ff0ad843 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/dbf124e1127f4750807677c1ff0ad843 2024-12-06T08:19:38,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/dbf124e1127f4750807677c1ff0ad843, entries=150, sequenceid=170, filesize=11.9 K 2024-12-06T08:19:38,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 5310d6bf57bb8b709e1aec9222644a3d in 876ms, sequenceid=170, compaction requested=true 2024-12-06T08:19:38,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:38,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:38,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:38,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:38,985 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:38,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:38,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:38,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:38,985 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:38,987 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:38,987 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/A is initiating minor compaction (all files) 2024-12-06T08:19:38,987 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/A in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,987 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d38b36100093481c971b971015756f07, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/25839990540c453592bc9ac4942f229d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/cf22fbd8618c401ab979bd7955195d61] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=35.8 K 2024-12-06T08:19:38,988 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:38,988 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/B is initiating minor compaction (all files) 2024-12-06T08:19:38,988 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting d38b36100093481c971b971015756f07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473175706 2024-12-06T08:19:38,988 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/B in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,988 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1e37ad58156f430bb2411e290dbc46b0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5cf712284811486e89e1f411adf94ad2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2e0013a500fc4aa299e770784c8638df] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=35.8 K 2024-12-06T08:19:38,988 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e37ad58156f430bb2411e290dbc46b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473175706 2024-12-06T08:19:38,988 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25839990540c453592bc9ac4942f229d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733473176395 2024-12-06T08:19:38,989 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5cf712284811486e89e1f411adf94ad2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733473176395 2024-12-06T08:19:38,989 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf22fbd8618c401ab979bd7955195d61, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473177545 2024-12-06T08:19:38,989 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e0013a500fc4aa299e770784c8638df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473177545 2024-12-06T08:19:38,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:38,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-06T08:19:38,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:38,992 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T08:19:38,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:38,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:38,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:38,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:38,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:38,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:38,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/8f581c53111e4d80b3d1e3adc8194966 is 50, key is test_row_0/A:col10/1733473178135/Put/seqid=0 2024-12-06T08:19:39,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742106_1282 (size=12151) 2024-12-06T08:19:39,014 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#B#compaction#235 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:39,015 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/742901966e414088922dda8a4aaa0b04 is 50, key is test_row_0/B:col10/1733473178107/Put/seqid=0 2024-12-06T08:19:39,021 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#A#compaction#236 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:39,022 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/3855689007464a1eb0712f9a700fbd4d is 50, key is test_row_0/A:col10/1733473178107/Put/seqid=0 2024-12-06T08:19:39,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742107_1283 (size=12527) 2024-12-06T08:19:39,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742108_1284 (size=12527) 2024-12-06T08:19:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T08:19:39,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:39,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:39,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473239259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473239258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473239259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473239260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473239362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473239362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473239362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473239363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,411 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/8f581c53111e4d80b3d1e3adc8194966 2024-12-06T08:19:39,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/7e9e38d6758545b699e9f7bf13ad58c9 is 50, key is test_row_0/B:col10/1733473178135/Put/seqid=0 2024-12-06T08:19:39,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742109_1285 (size=12151) 2024-12-06T08:19:39,425 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/7e9e38d6758545b699e9f7bf13ad58c9 2024-12-06T08:19:39,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/5bb70f46bf11447d9fc32e2751de910d is 50, key is test_row_0/C:col10/1733473178135/Put/seqid=0 2024-12-06T08:19:39,437 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/742901966e414088922dda8a4aaa0b04 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/742901966e414088922dda8a4aaa0b04 2024-12-06T08:19:39,442 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/B of 5310d6bf57bb8b709e1aec9222644a3d into 742901966e414088922dda8a4aaa0b04(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:39,442 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:39,442 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/B, priority=13, startTime=1733473178985; duration=0sec 2024-12-06T08:19:39,442 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:39,443 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:B 2024-12-06T08:19:39,443 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:39,444 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:39,444 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/C is initiating minor compaction (all files) 2024-12-06T08:19:39,444 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/C in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:39,444 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/4ad4584412494429befb59ea0040dc2a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab53af954e5c4d879bfd5e55151c3b32, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/dbf124e1127f4750807677c1ff0ad843] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=35.8 K 2024-12-06T08:19:39,444 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ad4584412494429befb59ea0040dc2a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733473175706 2024-12-06T08:19:39,445 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ab53af954e5c4d879bfd5e55151c3b32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733473176395 2024-12-06T08:19:39,445 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting dbf124e1127f4750807677c1ff0ad843, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473177545 2024-12-06T08:19:39,454 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#C#compaction#239 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:39,455 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/72cfbba84fec4c099446a1a6dccf9eae is 50, key is test_row_0/C:col10/1733473178107/Put/seqid=0 2024-12-06T08:19:39,463 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/3855689007464a1eb0712f9a700fbd4d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3855689007464a1eb0712f9a700fbd4d 2024-12-06T08:19:39,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742110_1286 (size=12151) 2024-12-06T08:19:39,470 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/A of 5310d6bf57bb8b709e1aec9222644a3d into 3855689007464a1eb0712f9a700fbd4d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:39,470 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:39,470 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/A, priority=13, startTime=1733473178985; duration=0sec 2024-12-06T08:19:39,470 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:39,470 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:A 2024-12-06T08:19:39,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742111_1287 (size=12527) 2024-12-06T08:19:39,484 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/72cfbba84fec4c099446a1a6dccf9eae as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/72cfbba84fec4c099446a1a6dccf9eae 2024-12-06T08:19:39,490 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/C of 5310d6bf57bb8b709e1aec9222644a3d into 72cfbba84fec4c099446a1a6dccf9eae(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:39,490 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:39,490 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/C, priority=13, startTime=1733473178985; duration=0sec 2024-12-06T08:19:39,490 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:39,490 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:C 2024-12-06T08:19:39,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473239565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473239566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473239566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473239566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473239681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,865 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/5bb70f46bf11447d9fc32e2751de910d 2024-12-06T08:19:39,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473239868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473239869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473239869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473239870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:39,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/8f581c53111e4d80b3d1e3adc8194966 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/8f581c53111e4d80b3d1e3adc8194966 2024-12-06T08:19:39,882 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/8f581c53111e4d80b3d1e3adc8194966, entries=150, sequenceid=194, filesize=11.9 K 2024-12-06T08:19:39,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/7e9e38d6758545b699e9f7bf13ad58c9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/7e9e38d6758545b699e9f7bf13ad58c9 2024-12-06T08:19:39,888 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/7e9e38d6758545b699e9f7bf13ad58c9, entries=150, sequenceid=194, filesize=11.9 K 2024-12-06T08:19:39,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/5bb70f46bf11447d9fc32e2751de910d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/5bb70f46bf11447d9fc32e2751de910d 2024-12-06T08:19:39,893 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/5bb70f46bf11447d9fc32e2751de910d, entries=150, sequenceid=194, filesize=11.9 K 2024-12-06T08:19:39,894 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 5310d6bf57bb8b709e1aec9222644a3d in 902ms, sequenceid=194, compaction requested=false 2024-12-06T08:19:39,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:39,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:39,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-06T08:19:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-06T08:19:39,897 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-06T08:19:39,897 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8280 sec 2024-12-06T08:19:39,898 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.8350 sec 2024-12-06T08:19:40,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-06T08:19:40,170 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-06T08:19:40,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:40,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-06T08:19:40,173 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:40,173 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:40,174 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:40,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T08:19:40,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T08:19:40,326 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-06T08:19:40,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:40,327 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T08:19:40,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:40,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:40,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:40,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:40,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:40,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:40,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/175181d415f74aeea999c3c803a95b2a is 50, key is test_row_1/A:col10/1733473179259/Put/seqid=0 2024-12-06T08:19:40,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742112_1288 (size=9757) 2024-12-06T08:19:40,339 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/175181d415f74aeea999c3c803a95b2a 2024-12-06T08:19:40,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/f0816ac82cfe4ed7b0e485782c56843c is 50, key is test_row_1/B:col10/1733473179259/Put/seqid=0 2024-12-06T08:19:40,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742113_1289 (size=9757) 2024-12-06T08:19:40,353 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/f0816ac82cfe4ed7b0e485782c56843c 2024-12-06T08:19:40,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/896eaa177ff34137a0ae2acacfda741b is 50, key is test_row_1/C:col10/1733473179259/Put/seqid=0 2024-12-06T08:19:40,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742114_1290 (size=9757) 2024-12-06T08:19:40,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:40,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:40,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473240388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473240389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473240389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473240390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T08:19:40,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473240491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473240492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473240492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473240493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473240695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473240695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473240695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:40,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473240697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:40,767 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/896eaa177ff34137a0ae2acacfda741b 2024-12-06T08:19:40,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/175181d415f74aeea999c3c803a95b2a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/175181d415f74aeea999c3c803a95b2a 2024-12-06T08:19:40,776 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/175181d415f74aeea999c3c803a95b2a, entries=100, sequenceid=210, filesize=9.5 K 2024-12-06T08:19:40,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T08:19:40,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/f0816ac82cfe4ed7b0e485782c56843c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f0816ac82cfe4ed7b0e485782c56843c 2024-12-06T08:19:40,781 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f0816ac82cfe4ed7b0e485782c56843c, entries=100, sequenceid=210, filesize=9.5 K 2024-12-06T08:19:40,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/896eaa177ff34137a0ae2acacfda741b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/896eaa177ff34137a0ae2acacfda741b 2024-12-06T08:19:40,786 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/896eaa177ff34137a0ae2acacfda741b, entries=100, sequenceid=210, filesize=9.5 K 2024-12-06T08:19:40,787 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5310d6bf57bb8b709e1aec9222644a3d in 460ms, sequenceid=210, compaction requested=true 2024-12-06T08:19:40,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:40,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:40,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-06T08:19:40,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-06T08:19:40,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-06T08:19:40,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 615 msec 2024-12-06T08:19:40,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 625 msec 2024-12-06T08:19:41,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:41,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T08:19:41,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:41,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:41,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:41,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:41,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:41,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:41,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/d43fbb81c6db448a8ba6e5e6fad64b88 is 50, key is test_row_0/A:col10/1733473180385/Put/seqid=0 2024-12-06T08:19:41,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473241008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473241009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473241010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473241010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742115_1291 (size=12151) 2024-12-06T08:19:41,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/d43fbb81c6db448a8ba6e5e6fad64b88 2024-12-06T08:19:41,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/92c43084adca448c982bf2b45f96e9a1 is 50, key is test_row_0/B:col10/1733473180385/Put/seqid=0 2024-12-06T08:19:41,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742116_1292 (size=12151) 2024-12-06T08:19:41,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473241112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473241112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473241113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473241113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-06T08:19:41,278 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-06T08:19:41,279 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:41,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-06T08:19:41,281 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:41,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-06T08:19:41,281 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:41,282 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:41,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473241314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473241315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473241316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473241316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-06T08:19:41,433 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-06T08:19:41,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:41,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:41,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:41,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:41,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:41,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:41,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/92c43084adca448c982bf2b45f96e9a1 2024-12-06T08:19:41,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/7d8fb85603b44f58b4aea71354314e59 is 50, key is test_row_0/C:col10/1733473180385/Put/seqid=0 2024-12-06T08:19:41,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742117_1293 (size=12151) 2024-12-06T08:19:41,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-06T08:19:41,586 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-06T08:19:41,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:41,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:41,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:41,588 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:41,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:41,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:41,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473241616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473241619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473241620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473241621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:41,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473241687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,689 DEBUG [Thread-1143 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:19:41,740 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-06T08:19:41,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:41,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:41,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:41,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:41,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:41,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:41,857 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/7d8fb85603b44f58b4aea71354314e59 2024-12-06T08:19:41,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/d43fbb81c6db448a8ba6e5e6fad64b88 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d43fbb81c6db448a8ba6e5e6fad64b88 2024-12-06T08:19:41,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d43fbb81c6db448a8ba6e5e6fad64b88, entries=150, sequenceid=235, filesize=11.9 K 2024-12-06T08:19:41,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/92c43084adca448c982bf2b45f96e9a1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/92c43084adca448c982bf2b45f96e9a1 2024-12-06T08:19:41,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/92c43084adca448c982bf2b45f96e9a1, entries=150, sequenceid=235, filesize=11.9 K 2024-12-06T08:19:41,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/7d8fb85603b44f58b4aea71354314e59 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/7d8fb85603b44f58b4aea71354314e59 2024-12-06T08:19:41,881 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/7d8fb85603b44f58b4aea71354314e59, entries=150, sequenceid=235, filesize=11.9 K 2024-12-06T08:19:41,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 5310d6bf57bb8b709e1aec9222644a3d in 881ms, sequenceid=235, compaction requested=true 2024-12-06T08:19:41,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:41,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:41,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:41,882 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:41,882 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:41,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:41,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:41,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:41,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:41,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-06T08:19:41,884 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46586 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:41,884 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46586 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:41,884 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/B is initiating minor compaction (all files) 2024-12-06T08:19:41,884 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/A is initiating minor compaction (all files) 2024-12-06T08:19:41,884 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/B in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:41,884 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/A in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:41,884 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/742901966e414088922dda8a4aaa0b04, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/7e9e38d6758545b699e9f7bf13ad58c9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f0816ac82cfe4ed7b0e485782c56843c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/92c43084adca448c982bf2b45f96e9a1] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=45.5 K 2024-12-06T08:19:41,885 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3855689007464a1eb0712f9a700fbd4d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/8f581c53111e4d80b3d1e3adc8194966, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/175181d415f74aeea999c3c803a95b2a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d43fbb81c6db448a8ba6e5e6fad64b88] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=45.5 K 2024-12-06T08:19:41,885 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3855689007464a1eb0712f9a700fbd4d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473177545 2024-12-06T08:19:41,885 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 742901966e414088922dda8a4aaa0b04, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473177545 2024-12-06T08:19:41,886 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f581c53111e4d80b3d1e3adc8194966, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733473178134 2024-12-06T08:19:41,886 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e9e38d6758545b699e9f7bf13ad58c9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733473178134 2024-12-06T08:19:41,886 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 175181d415f74aeea999c3c803a95b2a, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733473179258 2024-12-06T08:19:41,886 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f0816ac82cfe4ed7b0e485782c56843c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733473179258 2024-12-06T08:19:41,886 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting d43fbb81c6db448a8ba6e5e6fad64b88, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733473180385 2024-12-06T08:19:41,886 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 92c43084adca448c982bf2b45f96e9a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733473180385 2024-12-06T08:19:41,893 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:41,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-06T08:19:41,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:41,894 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:19:41,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:41,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:41,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:41,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:41,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:41,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:41,896 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#B#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:41,897 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/dc6a495da7da4117a16e7a8db0938a54 is 50, key is test_row_0/B:col10/1733473180385/Put/seqid=0 2024-12-06T08:19:41,897 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#A#compaction#247 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:41,898 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/9b7e06efef45480eb898b5ce11d589e0 is 50, key is test_row_0/A:col10/1733473180385/Put/seqid=0 2024-12-06T08:19:41,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/5d78125c9f7a46d59eb9048a2ee5ddda is 50, key is test_row_0/A:col10/1733473181009/Put/seqid=0 2024-12-06T08:19:41,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742118_1294 (size=12663) 2024-12-06T08:19:41,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742119_1295 (size=12663) 2024-12-06T08:19:41,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742120_1296 (size=12151) 2024-12-06T08:19:41,917 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/5d78125c9f7a46d59eb9048a2ee5ddda 2024-12-06T08:19:41,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/ee725fa5048247f3a0ff16b90436f678 is 50, key is test_row_0/B:col10/1733473181009/Put/seqid=0 2024-12-06T08:19:41,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742121_1297 (size=12151) 2024-12-06T08:19:41,935 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/ee725fa5048247f3a0ff16b90436f678 2024-12-06T08:19:41,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/560ca0d1053f4c14a315810c9e41c834 is 50, key is test_row_0/C:col10/1733473181009/Put/seqid=0 2024-12-06T08:19:41,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742122_1298 (size=12151) 2024-12-06T08:19:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:42,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:42,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473242170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473242173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473242174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473242175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473242276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473242278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473242279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473242279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,316 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/dc6a495da7da4117a16e7a8db0938a54 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/dc6a495da7da4117a16e7a8db0938a54 2024-12-06T08:19:42,321 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/9b7e06efef45480eb898b5ce11d589e0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9b7e06efef45480eb898b5ce11d589e0 2024-12-06T08:19:42,323 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/B of 5310d6bf57bb8b709e1aec9222644a3d into dc6a495da7da4117a16e7a8db0938a54(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:42,323 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:42,323 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/B, priority=12, startTime=1733473181882; duration=0sec 2024-12-06T08:19:42,323 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:42,323 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:B 2024-12-06T08:19:42,323 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:19:42,325 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46586 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:19:42,325 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/C is initiating minor compaction (all files) 2024-12-06T08:19:42,325 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/C in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:42,325 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/72cfbba84fec4c099446a1a6dccf9eae, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/5bb70f46bf11447d9fc32e2751de910d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/896eaa177ff34137a0ae2acacfda741b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/7d8fb85603b44f58b4aea71354314e59] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=45.5 K 2024-12-06T08:19:42,326 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 72cfbba84fec4c099446a1a6dccf9eae, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473177545 2024-12-06T08:19:42,326 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/A of 5310d6bf57bb8b709e1aec9222644a3d into 9b7e06efef45480eb898b5ce11d589e0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:42,326 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:42,326 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/A, priority=12, startTime=1733473181882; duration=0sec 2024-12-06T08:19:42,326 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:42,326 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:A 2024-12-06T08:19:42,326 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bb70f46bf11447d9fc32e2751de910d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733473178134 2024-12-06T08:19:42,326 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 896eaa177ff34137a0ae2acacfda741b, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733473179258 2024-12-06T08:19:42,327 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d8fb85603b44f58b4aea71354314e59, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733473180385 2024-12-06T08:19:42,336 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#C#compaction#251 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:42,337 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/92f8fcfaee3342e481efa63c00274c4b is 50, key is test_row_0/C:col10/1733473180385/Put/seqid=0 2024-12-06T08:19:42,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742123_1299 (size=12663) 2024-12-06T08:19:42,356 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/560ca0d1053f4c14a315810c9e41c834 2024-12-06T08:19:42,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/5d78125c9f7a46d59eb9048a2ee5ddda as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/5d78125c9f7a46d59eb9048a2ee5ddda 2024-12-06T08:19:42,366 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/5d78125c9f7a46d59eb9048a2ee5ddda, entries=150, sequenceid=246, filesize=11.9 K 2024-12-06T08:19:42,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/ee725fa5048247f3a0ff16b90436f678 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ee725fa5048247f3a0ff16b90436f678 2024-12-06T08:19:42,372 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ee725fa5048247f3a0ff16b90436f678, entries=150, sequenceid=246, filesize=11.9 K 2024-12-06T08:19:42,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/560ca0d1053f4c14a315810c9e41c834 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/560ca0d1053f4c14a315810c9e41c834 2024-12-06T08:19:42,377 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/560ca0d1053f4c14a315810c9e41c834, entries=150, sequenceid=246, filesize=11.9 K 2024-12-06T08:19:42,378 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 5310d6bf57bb8b709e1aec9222644a3d in 484ms, sequenceid=246, compaction requested=false 2024-12-06T08:19:42,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:42,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:42,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-06T08:19:42,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-06T08:19:42,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-06T08:19:42,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0990 sec 2024-12-06T08:19:42,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.1030 sec 2024-12-06T08:19:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-06T08:19:42,385 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-06T08:19:42,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:42,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-06T08:19:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T08:19:42,387 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:42,387 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:42,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:42,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T08:19:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:42,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/36a30f4d00b64ca7a645f47d332ca028 is 50, key is test_row_0/A:col10/1733473182480/Put/seqid=0 2024-12-06T08:19:42,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T08:19:42,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473242486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473242487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473242488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473242489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742124_1300 (size=12301) 2024-12-06T08:19:42,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/36a30f4d00b64ca7a645f47d332ca028 2024-12-06T08:19:42,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/cedb11d11ff044fd857b3e2401f8b003 is 50, key is test_row_0/B:col10/1733473182480/Put/seqid=0 2024-12-06T08:19:42,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742125_1301 (size=12301) 2024-12-06T08:19:42,539 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-06T08:19:42,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:42,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:42,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:42,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:42,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:42,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:42,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473242591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473242591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473242591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473242591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T08:19:42,692 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-06T08:19:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:42,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:42,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:42,761 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/92f8fcfaee3342e481efa63c00274c4b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/92f8fcfaee3342e481efa63c00274c4b 2024-12-06T08:19:42,767 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/C of 5310d6bf57bb8b709e1aec9222644a3d into 92f8fcfaee3342e481efa63c00274c4b(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:42,767 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:42,767 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/C, priority=12, startTime=1733473181882; duration=0sec 2024-12-06T08:19:42,767 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:42,768 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:C 2024-12-06T08:19:42,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473242794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473242794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473242795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473242795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,845 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:42,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-06T08:19:42,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:42,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:42,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:42,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:42,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:42,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/cedb11d11ff044fd857b3e2401f8b003 2024-12-06T08:19:42,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/86548843db444c9eb4186321f2300155 is 50, key is test_row_0/C:col10/1733473182480/Put/seqid=0 2024-12-06T08:19:42,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742126_1302 (size=12301) 2024-12-06T08:19:42,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/86548843db444c9eb4186321f2300155 2024-12-06T08:19:42,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/36a30f4d00b64ca7a645f47d332ca028 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/36a30f4d00b64ca7a645f47d332ca028 2024-12-06T08:19:42,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/36a30f4d00b64ca7a645f47d332ca028, entries=150, sequenceid=274, filesize=12.0 K 2024-12-06T08:19:42,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/cedb11d11ff044fd857b3e2401f8b003 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/cedb11d11ff044fd857b3e2401f8b003 2024-12-06T08:19:42,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/cedb11d11ff044fd857b3e2401f8b003, entries=150, sequenceid=274, filesize=12.0 K 2024-12-06T08:19:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/86548843db444c9eb4186321f2300155 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/86548843db444c9eb4186321f2300155 2024-12-06T08:19:42,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/86548843db444c9eb4186321f2300155, entries=150, sequenceid=274, filesize=12.0 K 2024-12-06T08:19:42,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 5310d6bf57bb8b709e1aec9222644a3d in 491ms, sequenceid=274, compaction requested=true 2024-12-06T08:19:42,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:42,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:42,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:42,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:42,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:42,972 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:42,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:42,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:42,972 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:42,973 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:42,973 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:42,973 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/A is initiating minor compaction (all files) 2024-12-06T08:19:42,973 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/A in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:42,973 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/B is initiating minor compaction (all files) 2024-12-06T08:19:42,974 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/B in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:42,974 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9b7e06efef45480eb898b5ce11d589e0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/5d78125c9f7a46d59eb9048a2ee5ddda, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/36a30f4d00b64ca7a645f47d332ca028] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=36.2 K 2024-12-06T08:19:42,974 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/dc6a495da7da4117a16e7a8db0938a54, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ee725fa5048247f3a0ff16b90436f678, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/cedb11d11ff044fd857b3e2401f8b003] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=36.2 K 2024-12-06T08:19:42,974 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b7e06efef45480eb898b5ce11d589e0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733473180385 2024-12-06T08:19:42,974 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting dc6a495da7da4117a16e7a8db0938a54, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733473180385 2024-12-06T08:19:42,975 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d78125c9f7a46d59eb9048a2ee5ddda, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733473181003 2024-12-06T08:19:42,975 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ee725fa5048247f3a0ff16b90436f678, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733473181003 2024-12-06T08:19:42,975 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36a30f4d00b64ca7a645f47d332ca028, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733473182172 2024-12-06T08:19:42,975 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting cedb11d11ff044fd857b3e2401f8b003, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733473182172 2024-12-06T08:19:42,988 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#A#compaction#255 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:42,988 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/7c1a0a0179c341f6945ec87d722bc12e is 50, key is test_row_0/A:col10/1733473182480/Put/seqid=0 2024-12-06T08:19:42,989 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#B#compaction#256 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:42,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T08:19:42,990 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/d3d5534abf5b4d5bb68155eef1d1461a is 50, key is test_row_0/B:col10/1733473182480/Put/seqid=0 2024-12-06T08:19:42,999 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-06T08:19:43,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:43,000 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-06T08:19:43,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:43,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:43,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:43,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:43,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:43,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:43,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742127_1303 (size=12915) 2024-12-06T08:19:43,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/52ec04e1f77242b29e63c6972f91cafd is 50, key is test_row_1/A:col10/1733473182488/Put/seqid=0 2024-12-06T08:19:43,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742128_1304 (size=12915) 2024-12-06T08:19:43,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742129_1305 (size=9857) 2024-12-06T08:19:43,024 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/52ec04e1f77242b29e63c6972f91cafd 2024-12-06T08:19:43,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/9aaaebbec4e6444f89a9a7f82a37261f is 50, key is test_row_1/B:col10/1733473182488/Put/seqid=0 2024-12-06T08:19:43,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742130_1306 (size=9857) 2024-12-06T08:19:43,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:43,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:43,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473243116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473243118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473243118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473243119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473243220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473243221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473243222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473243222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473243426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473243426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473243426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473243427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,427 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/d3d5534abf5b4d5bb68155eef1d1461a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d3d5534abf5b4d5bb68155eef1d1461a 2024-12-06T08:19:43,430 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/7c1a0a0179c341f6945ec87d722bc12e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/7c1a0a0179c341f6945ec87d722bc12e 2024-12-06T08:19:43,433 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/B of 5310d6bf57bb8b709e1aec9222644a3d into d3d5534abf5b4d5bb68155eef1d1461a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:43,433 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:43,433 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/B, priority=13, startTime=1733473182972; duration=0sec 2024-12-06T08:19:43,433 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:43,433 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:B 2024-12-06T08:19:43,433 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:43,435 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:43,435 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/C is initiating minor compaction (all files) 2024-12-06T08:19:43,435 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/C in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:43,435 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/92f8fcfaee3342e481efa63c00274c4b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/560ca0d1053f4c14a315810c9e41c834, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/86548843db444c9eb4186321f2300155] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=36.2 K 2024-12-06T08:19:43,435 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 92f8fcfaee3342e481efa63c00274c4b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733473180385 2024-12-06T08:19:43,435 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/A of 5310d6bf57bb8b709e1aec9222644a3d into 7c1a0a0179c341f6945ec87d722bc12e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:43,435 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:43,435 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/A, priority=13, startTime=1733473182972; duration=0sec 2024-12-06T08:19:43,435 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:43,435 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:A 2024-12-06T08:19:43,436 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 560ca0d1053f4c14a315810c9e41c834, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733473181003 2024-12-06T08:19:43,436 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 86548843db444c9eb4186321f2300155, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733473182172 2024-12-06T08:19:43,442 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/9aaaebbec4e6444f89a9a7f82a37261f 2024-12-06T08:19:43,445 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#C#compaction#259 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:43,446 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/c22d3a6a1c34493bb777aef0301cc343 is 50, key is test_row_0/C:col10/1733473182480/Put/seqid=0 2024-12-06T08:19:43,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/28f65f363151429e85e2238dc07668e4 is 50, key is test_row_1/C:col10/1733473182488/Put/seqid=0 2024-12-06T08:19:43,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742131_1307 (size=12915) 2024-12-06T08:19:43,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742132_1308 (size=9857) 2024-12-06T08:19:43,468 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/28f65f363151429e85e2238dc07668e4 2024-12-06T08:19:43,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/52ec04e1f77242b29e63c6972f91cafd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/52ec04e1f77242b29e63c6972f91cafd 2024-12-06T08:19:43,479 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/52ec04e1f77242b29e63c6972f91cafd, entries=100, sequenceid=285, filesize=9.6 K 2024-12-06T08:19:43,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/9aaaebbec4e6444f89a9a7f82a37261f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/9aaaebbec4e6444f89a9a7f82a37261f 2024-12-06T08:19:43,486 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/9aaaebbec4e6444f89a9a7f82a37261f, entries=100, sequenceid=285, filesize=9.6 K 2024-12-06T08:19:43,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/28f65f363151429e85e2238dc07668e4 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/28f65f363151429e85e2238dc07668e4 2024-12-06T08:19:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T08:19:43,492 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/28f65f363151429e85e2238dc07668e4, entries=100, sequenceid=285, filesize=9.6 K 2024-12-06T08:19:43,493 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 5310d6bf57bb8b709e1aec9222644a3d in 493ms, sequenceid=285, compaction requested=false 2024-12-06T08:19:43,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:43,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:43,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-06T08:19:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-06T08:19:43,501 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-06T08:19:43,501 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1070 sec 2024-12-06T08:19:43,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.1150 sec 2024-12-06T08:19:43,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:43,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-06T08:19:43,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:43,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:43,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:43,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:43,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:43,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:43,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473243764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473243764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473243764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473243765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/1ae0dd22d45d42588194b29c14fdb74c is 50, key is test_row_0/A:col10/1733473183115/Put/seqid=0 2024-12-06T08:19:43,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742133_1309 (size=12301) 2024-12-06T08:19:43,863 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/c22d3a6a1c34493bb777aef0301cc343 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c22d3a6a1c34493bb777aef0301cc343 2024-12-06T08:19:43,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,868 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/C of 5310d6bf57bb8b709e1aec9222644a3d into c22d3a6a1c34493bb777aef0301cc343(size=12.6 K), total size for store is 22.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:43,868 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:43,868 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/C, priority=13, startTime=1733473182972; duration=0sec 2024-12-06T08:19:43,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473243868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,869 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:43,869 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:C 2024-12-06T08:19:43,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473243868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473243868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:43,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:43,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473243869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473244071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473244071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473244071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473244071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/1ae0dd22d45d42588194b29c14fdb74c 2024-12-06T08:19:44,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/ff458ff9e3f94733955b7839c2d7547f is 50, key is test_row_0/B:col10/1733473183115/Put/seqid=0 2024-12-06T08:19:44,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742134_1310 (size=12301) 2024-12-06T08:19:44,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473244374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473244373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473244374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473244374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-06T08:19:44,491 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-06T08:19:44,492 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:44,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-06T08:19:44,494 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:44,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T08:19:44,495 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:44,495 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:44,592 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/ff458ff9e3f94733955b7839c2d7547f 2024-12-06T08:19:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T08:19:44,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/3ff713d530994686a27151ba51b8886b is 50, key is test_row_0/C:col10/1733473183115/Put/seqid=0 2024-12-06T08:19:44,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742135_1311 (size=12301) 2024-12-06T08:19:44,646 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-06T08:19:44,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:44,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:44,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:44,647 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:44,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:44,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:44,799 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-06T08:19:44,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:44,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:44,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:44,801 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:44,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:44,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T08:19:44,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:44,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473244882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473244882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473244882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:44,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473244884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,956 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:44,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-06T08:19:44,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:44,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:44,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:44,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:44,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:44,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:45,006 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/3ff713d530994686a27151ba51b8886b 2024-12-06T08:19:45,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/1ae0dd22d45d42588194b29c14fdb74c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/1ae0dd22d45d42588194b29c14fdb74c 2024-12-06T08:19:45,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/1ae0dd22d45d42588194b29c14fdb74c, entries=150, sequenceid=316, filesize=12.0 K 2024-12-06T08:19:45,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/ff458ff9e3f94733955b7839c2d7547f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ff458ff9e3f94733955b7839c2d7547f 2024-12-06T08:19:45,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ff458ff9e3f94733955b7839c2d7547f, entries=150, sequenceid=316, filesize=12.0 K 2024-12-06T08:19:45,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/3ff713d530994686a27151ba51b8886b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/3ff713d530994686a27151ba51b8886b 2024-12-06T08:19:45,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/3ff713d530994686a27151ba51b8886b, entries=150, sequenceid=316, filesize=12.0 K 2024-12-06T08:19:45,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 5310d6bf57bb8b709e1aec9222644a3d in 1294ms, sequenceid=316, compaction requested=true 2024-12-06T08:19:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:45,025 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:45,025 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:45,026 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35073 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:45,026 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35073 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:45,026 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/B is initiating minor compaction (all files) 2024-12-06T08:19:45,026 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/A is initiating minor compaction (all files) 2024-12-06T08:19:45,026 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/A in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:45,026 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/B in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:45,026 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/7c1a0a0179c341f6945ec87d722bc12e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/52ec04e1f77242b29e63c6972f91cafd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/1ae0dd22d45d42588194b29c14fdb74c] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=34.3 K 2024-12-06T08:19:45,026 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d3d5534abf5b4d5bb68155eef1d1461a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/9aaaebbec4e6444f89a9a7f82a37261f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ff458ff9e3f94733955b7839c2d7547f] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=34.3 K 2024-12-06T08:19:45,027 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting d3d5534abf5b4d5bb68155eef1d1461a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733473182172 2024-12-06T08:19:45,027 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c1a0a0179c341f6945ec87d722bc12e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733473182172 2024-12-06T08:19:45,027 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52ec04e1f77242b29e63c6972f91cafd, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733473182487 2024-12-06T08:19:45,027 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 9aaaebbec4e6444f89a9a7f82a37261f, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733473182487 2024-12-06T08:19:45,028 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ae0dd22d45d42588194b29c14fdb74c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733473183115 2024-12-06T08:19:45,028 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ff458ff9e3f94733955b7839c2d7547f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733473183115 2024-12-06T08:19:45,036 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#A#compaction#264 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:45,036 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/c15d6eb3616b4f5c91076815ddb7ffed is 50, key is test_row_0/A:col10/1733473183115/Put/seqid=0 2024-12-06T08:19:45,037 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#B#compaction#265 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:45,037 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/3f79ecdd82d94314b87ec8ad7fcf61ac is 50, key is test_row_0/B:col10/1733473183115/Put/seqid=0 2024-12-06T08:19:45,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742136_1312 (size=13017) 2024-12-06T08:19:45,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742137_1313 (size=13017) 2024-12-06T08:19:45,048 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/3f79ecdd82d94314b87ec8ad7fcf61ac as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/3f79ecdd82d94314b87ec8ad7fcf61ac 2024-12-06T08:19:45,060 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/B of 5310d6bf57bb8b709e1aec9222644a3d into 3f79ecdd82d94314b87ec8ad7fcf61ac(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:45,060 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:45,060 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/B, priority=13, startTime=1733473185025; duration=0sec 2024-12-06T08:19:45,060 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:45,060 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:B 2024-12-06T08:19:45,060 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:45,061 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35073 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:45,062 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/C is initiating minor compaction (all files) 2024-12-06T08:19:45,062 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/C in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:45,062 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c22d3a6a1c34493bb777aef0301cc343, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/28f65f363151429e85e2238dc07668e4, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/3ff713d530994686a27151ba51b8886b] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=34.3 K 2024-12-06T08:19:45,062 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting c22d3a6a1c34493bb777aef0301cc343, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733473182172 2024-12-06T08:19:45,063 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 28f65f363151429e85e2238dc07668e4, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733473182487 2024-12-06T08:19:45,063 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ff713d530994686a27151ba51b8886b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733473183115 2024-12-06T08:19:45,071 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#C#compaction#266 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:45,072 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/ab053350fafb48b883ff61e9315590a5 is 50, key is test_row_0/C:col10/1733473183115/Put/seqid=0 2024-12-06T08:19:45,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742138_1314 (size=13017) 2024-12-06T08:19:45,083 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/ab053350fafb48b883ff61e9315590a5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab053350fafb48b883ff61e9315590a5 2024-12-06T08:19:45,089 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/C of 5310d6bf57bb8b709e1aec9222644a3d into ab053350fafb48b883ff61e9315590a5(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:45,089 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:45,089 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/C, priority=13, startTime=1733473185025; duration=0sec 2024-12-06T08:19:45,089 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:45,089 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:C 2024-12-06T08:19:45,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T08:19:45,109 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:45,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-06T08:19:45,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:45,110 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-06T08:19:45,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:45,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:45,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:45,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:45,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:45,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:45,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/f8bf69ce03084a799be6b6c0c4644c7b is 50, key is test_row_0/A:col10/1733473183736/Put/seqid=0 2024-12-06T08:19:45,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742139_1315 (size=12301) 2024-12-06T08:19:45,450 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/c15d6eb3616b4f5c91076815ddb7ffed as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c15d6eb3616b4f5c91076815ddb7ffed 2024-12-06T08:19:45,463 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/A of 5310d6bf57bb8b709e1aec9222644a3d into c15d6eb3616b4f5c91076815ddb7ffed(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:45,463 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:45,463 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/A, priority=13, startTime=1733473185025; duration=0sec 2024-12-06T08:19:45,463 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:45,463 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:A 2024-12-06T08:19:45,552 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/f8bf69ce03084a799be6b6c0c4644c7b 2024-12-06T08:19:45,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/2c42842cee7b4622b214a098005338e3 is 50, key is test_row_0/B:col10/1733473183736/Put/seqid=0 2024-12-06T08:19:45,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742140_1316 (size=12301) 2024-12-06T08:19:45,566 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/2c42842cee7b4622b214a098005338e3 2024-12-06T08:19:45,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/55ccef09352f4b0680634f00515e690e is 50, key is test_row_0/C:col10/1733473183736/Put/seqid=0 2024-12-06T08:19:45,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742141_1317 (size=12301) 2024-12-06T08:19:45,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T08:19:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:45,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:45,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473245771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:45,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473245874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:45,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473245885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:45,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:45,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473245888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:45,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:45,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473245889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:45,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:45,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473245891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:45,990 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/55ccef09352f4b0680634f00515e690e 2024-12-06T08:19:45,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/f8bf69ce03084a799be6b6c0c4644c7b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f8bf69ce03084a799be6b6c0c4644c7b 2024-12-06T08:19:46,006 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f8bf69ce03084a799be6b6c0c4644c7b, entries=150, sequenceid=326, filesize=12.0 K 2024-12-06T08:19:46,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/2c42842cee7b4622b214a098005338e3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2c42842cee7b4622b214a098005338e3 2024-12-06T08:19:46,011 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2c42842cee7b4622b214a098005338e3, entries=150, sequenceid=326, filesize=12.0 K 2024-12-06T08:19:46,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/55ccef09352f4b0680634f00515e690e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/55ccef09352f4b0680634f00515e690e 2024-12-06T08:19:46,017 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/55ccef09352f4b0680634f00515e690e, entries=150, sequenceid=326, filesize=12.0 K 2024-12-06T08:19:46,018 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 5310d6bf57bb8b709e1aec9222644a3d in 909ms, sequenceid=326, compaction requested=false 2024-12-06T08:19:46,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:46,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:46,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-06T08:19:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-06T08:19:46,022 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-06T08:19:46,022 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5250 sec 2024-12-06T08:19:46,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.5290 sec 2024-12-06T08:19:46,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:46,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-06T08:19:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:46,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:46,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/77c78d355a314b1181a2b1254c065cce is 50, key is test_row_0/A:col10/1733473185763/Put/seqid=0 2024-12-06T08:19:46,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473246088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:46,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742142_1318 (size=12301) 2024-12-06T08:19:46,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/77c78d355a314b1181a2b1254c065cce 2024-12-06T08:19:46,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/d115dae5eee242d0adf6c85a08169126 is 50, key is test_row_0/B:col10/1733473185763/Put/seqid=0 2024-12-06T08:19:46,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742143_1319 (size=12301) 2024-12-06T08:19:46,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473246191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:46,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473246394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:46,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/d115dae5eee242d0adf6c85a08169126 2024-12-06T08:19:46,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/9b773bab7a214efa8de6310135a30a52 is 50, key is test_row_0/C:col10/1733473185763/Put/seqid=0 2024-12-06T08:19:46,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742144_1320 (size=12301) 2024-12-06T08:19:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-06T08:19:46,607 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-06T08:19:46,611 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-06T08:19:46,614 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T08:19:46,620 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:46,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:46,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:46,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473246701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:46,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T08:19:46,772 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:46,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-06T08:19:46,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:46,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:46,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:46,773 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:46,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:46,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:46,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T08:19:46,926 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:46,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-06T08:19:46,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:46,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:46,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:46,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:46,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:46,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:46,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/9b773bab7a214efa8de6310135a30a52 2024-12-06T08:19:46,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/77c78d355a314b1181a2b1254c065cce as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/77c78d355a314b1181a2b1254c065cce 2024-12-06T08:19:46,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/77c78d355a314b1181a2b1254c065cce, entries=150, sequenceid=357, filesize=12.0 K 2024-12-06T08:19:46,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/d115dae5eee242d0adf6c85a08169126 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d115dae5eee242d0adf6c85a08169126 2024-12-06T08:19:46,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d115dae5eee242d0adf6c85a08169126, entries=150, sequenceid=357, filesize=12.0 K 2024-12-06T08:19:46,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/9b773bab7a214efa8de6310135a30a52 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/9b773bab7a214efa8de6310135a30a52 2024-12-06T08:19:46,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/9b773bab7a214efa8de6310135a30a52, entries=150, sequenceid=357, filesize=12.0 K 2024-12-06T08:19:46,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 5310d6bf57bb8b709e1aec9222644a3d in 920ms, sequenceid=357, compaction requested=true 2024-12-06T08:19:46,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:46,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:46,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:46,999 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:46,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:46,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:47,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:47,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:47,000 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:47,001 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:47,001 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/A is initiating minor compaction (all files) 2024-12-06T08:19:47,001 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/A in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:47,001 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c15d6eb3616b4f5c91076815ddb7ffed, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f8bf69ce03084a799be6b6c0c4644c7b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/77c78d355a314b1181a2b1254c065cce] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=36.7 K 2024-12-06T08:19:47,001 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:47,001 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c15d6eb3616b4f5c91076815ddb7ffed, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733473183115 2024-12-06T08:19:47,002 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/B is initiating minor compaction (all files) 2024-12-06T08:19:47,002 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/B in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:47,002 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/3f79ecdd82d94314b87ec8ad7fcf61ac, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2c42842cee7b4622b214a098005338e3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d115dae5eee242d0adf6c85a08169126] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=36.7 K 2024-12-06T08:19:47,002 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8bf69ce03084a799be6b6c0c4644c7b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733473183734 2024-12-06T08:19:47,002 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f79ecdd82d94314b87ec8ad7fcf61ac, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733473183115 2024-12-06T08:19:47,003 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77c78d355a314b1181a2b1254c065cce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733473185763 2024-12-06T08:19:47,003 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c42842cee7b4622b214a098005338e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733473183734 2024-12-06T08:19:47,003 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting d115dae5eee242d0adf6c85a08169126, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733473185763 2024-12-06T08:19:47,012 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#A#compaction#273 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:47,012 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/f208d293f61a4dd2a319b51340f3beaa is 50, key is test_row_0/A:col10/1733473185763/Put/seqid=0 2024-12-06T08:19:47,029 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#B#compaction#274 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:47,030 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/f630f3663b6c4644b31886bd77e2b4ed is 50, key is test_row_0/B:col10/1733473185763/Put/seqid=0 2024-12-06T08:19:47,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742145_1321 (size=13119) 2024-12-06T08:19:47,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742146_1322 (size=13119) 2024-12-06T08:19:47,074 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/f630f3663b6c4644b31886bd77e2b4ed as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f630f3663b6c4644b31886bd77e2b4ed 2024-12-06T08:19:47,079 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:47,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-06T08:19:47,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:47,080 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-06T08:19:47,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:47,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:47,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:47,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:47,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:47,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:47,085 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/B of 5310d6bf57bb8b709e1aec9222644a3d into f630f3663b6c4644b31886bd77e2b4ed(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:47,085 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:47,085 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/B, priority=13, startTime=1733473186999; duration=0sec 2024-12-06T08:19:47,085 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:47,085 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:B 2024-12-06T08:19:47,086 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:47,087 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:47,087 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/C is initiating minor compaction (all files) 2024-12-06T08:19:47,087 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/C in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:47,087 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab053350fafb48b883ff61e9315590a5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/55ccef09352f4b0680634f00515e690e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/9b773bab7a214efa8de6310135a30a52] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=36.7 K 2024-12-06T08:19:47,088 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ab053350fafb48b883ff61e9315590a5, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733473183115 2024-12-06T08:19:47,089 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 55ccef09352f4b0680634f00515e690e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733473183734 2024-12-06T08:19:47,089 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b773bab7a214efa8de6310135a30a52, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733473185763 2024-12-06T08:19:47,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/c2d502d1b5634017aaba5b9f6a9dd9b1 is 50, key is test_row_0/A:col10/1733473186085/Put/seqid=0 2024-12-06T08:19:47,110 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#C#compaction#276 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:47,110 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/94ef09681f6a401fa16c23370c1e8eda is 50, key is test_row_0/C:col10/1733473185763/Put/seqid=0 2024-12-06T08:19:47,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742147_1323 (size=9857) 2024-12-06T08:19:47,147 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/c2d502d1b5634017aaba5b9f6a9dd9b1 2024-12-06T08:19:47,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742148_1324 (size=13119) 2024-12-06T08:19:47,181 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/94ef09681f6a401fa16c23370c1e8eda as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/94ef09681f6a401fa16c23370c1e8eda 2024-12-06T08:19:47,187 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/C of 5310d6bf57bb8b709e1aec9222644a3d into 94ef09681f6a401fa16c23370c1e8eda(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:47,187 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:47,187 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/C, priority=13, startTime=1733473186999; duration=0sec 2024-12-06T08:19:47,187 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:47,187 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:C 2024-12-06T08:19:47,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/d420871fce514e30a14be79b6035ef4b is 50, key is test_row_0/B:col10/1733473186085/Put/seqid=0 2024-12-06T08:19:47,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T08:19:47,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:47,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:47,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742149_1325 (size=9857) 2024-12-06T08:19:47,234 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/d420871fce514e30a14be79b6035ef4b 2024-12-06T08:19:47,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/8b3bc414a9aa41fe8b196471839fcae5 is 50, key is test_row_0/C:col10/1733473186085/Put/seqid=0 2024-12-06T08:19:47,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742150_1326 (size=9857) 2024-12-06T08:19:47,294 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/8b3bc414a9aa41fe8b196471839fcae5 2024-12-06T08:19:47,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/c2d502d1b5634017aaba5b9f6a9dd9b1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c2d502d1b5634017aaba5b9f6a9dd9b1 2024-12-06T08:19:47,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:47,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473247300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:47,305 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c2d502d1b5634017aaba5b9f6a9dd9b1, entries=100, sequenceid=363, filesize=9.6 K 2024-12-06T08:19:47,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/d420871fce514e30a14be79b6035ef4b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d420871fce514e30a14be79b6035ef4b 2024-12-06T08:19:47,315 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d420871fce514e30a14be79b6035ef4b, entries=100, sequenceid=363, filesize=9.6 K 2024-12-06T08:19:47,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/8b3bc414a9aa41fe8b196471839fcae5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/8b3bc414a9aa41fe8b196471839fcae5 2024-12-06T08:19:47,329 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/8b3bc414a9aa41fe8b196471839fcae5, entries=100, sequenceid=363, filesize=9.6 K 2024-12-06T08:19:47,331 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 5310d6bf57bb8b709e1aec9222644a3d in 251ms, sequenceid=363, compaction requested=false 2024-12-06T08:19:47,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:47,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:47,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-06T08:19:47,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-06T08:19:47,334 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-06T08:19:47,334 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 713 msec 2024-12-06T08:19:47,339 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 727 msec 2024-12-06T08:19:47,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:47,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-12-06T08:19:47,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:47,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:47,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:47,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:47,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:47,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:47,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b862e88f7ed842798f570a0f8119ee41 is 50, key is test_row_0/A:col10/1733473187404/Put/seqid=0 2024-12-06T08:19:47,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:47,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473247415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:47,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742151_1327 (size=14741) 2024-12-06T08:19:47,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b862e88f7ed842798f570a0f8119ee41 2024-12-06T08:19:47,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/e401d7e83e384e39b383896b700fec2e is 50, key is test_row_0/B:col10/1733473187404/Put/seqid=0 2024-12-06T08:19:47,470 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/f208d293f61a4dd2a319b51340f3beaa as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f208d293f61a4dd2a319b51340f3beaa 2024-12-06T08:19:47,477 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/A of 5310d6bf57bb8b709e1aec9222644a3d into f208d293f61a4dd2a319b51340f3beaa(size=12.8 K), total size for store is 22.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:47,477 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:47,477 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/A, priority=13, startTime=1733473186999; duration=0sec 2024-12-06T08:19:47,479 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:47,479 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:A 2024-12-06T08:19:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742152_1328 (size=12301) 2024-12-06T08:19:47,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/e401d7e83e384e39b383896b700fec2e 2024-12-06T08:19:47,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:47,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473247519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:47,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/d3e23c144c1640848bf74b005fab390c is 50, key is test_row_0/C:col10/1733473187404/Put/seqid=0 2024-12-06T08:19:47,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742153_1329 (size=12301) 2024-12-06T08:19:47,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-06T08:19:47,719 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-06T08:19:47,720 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:47,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-06T08:19:47,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-06T08:19:47,722 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:47,723 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:47,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:47,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:47,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473247721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:47,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-06T08:19:47,875 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:47,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-06T08:19:47,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:47,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:47,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:47,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:47,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:47,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:47,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:47,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473247898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:47,899 DEBUG [Thread-1147 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:19:47,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:47,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473247899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:47,900 DEBUG [Thread-1145 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:19:47,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:47,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473247904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:47,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:47,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473247904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:47,906 DEBUG [Thread-1151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:19:47,906 DEBUG [Thread-1149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:19:47,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/d3e23c144c1640848bf74b005fab390c 2024-12-06T08:19:47,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/b862e88f7ed842798f570a0f8119ee41 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b862e88f7ed842798f570a0f8119ee41 2024-12-06T08:19:47,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b862e88f7ed842798f570a0f8119ee41, entries=200, sequenceid=396, filesize=14.4 K 2024-12-06T08:19:47,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/e401d7e83e384e39b383896b700fec2e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/e401d7e83e384e39b383896b700fec2e 2024-12-06T08:19:47,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/e401d7e83e384e39b383896b700fec2e, entries=150, sequenceid=396, filesize=12.0 K 2024-12-06T08:19:47,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/d3e23c144c1640848bf74b005fab390c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d3e23c144c1640848bf74b005fab390c 2024-12-06T08:19:47,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d3e23c144c1640848bf74b005fab390c, entries=150, sequenceid=396, filesize=12.0 K 2024-12-06T08:19:47,995 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for 5310d6bf57bb8b709e1aec9222644a3d in 589ms, sequenceid=396, compaction requested=true 2024-12-06T08:19:47,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:47,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:47,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:47,996 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:47,996 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:47,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:47,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:47,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:47,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:47,997 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:47,997 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/B is initiating minor compaction (all files) 2024-12-06T08:19:47,997 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/B in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:47,997 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f630f3663b6c4644b31886bd77e2b4ed, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d420871fce514e30a14be79b6035ef4b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/e401d7e83e384e39b383896b700fec2e] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=34.5 K 2024-12-06T08:19:47,997 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37717 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:47,997 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/A is initiating minor compaction (all files) 2024-12-06T08:19:47,998 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/A in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:47,998 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f208d293f61a4dd2a319b51340f3beaa, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c2d502d1b5634017aaba5b9f6a9dd9b1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b862e88f7ed842798f570a0f8119ee41] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=36.8 K 2024-12-06T08:19:47,998 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting f208d293f61a4dd2a319b51340f3beaa, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733473185763 2024-12-06T08:19:47,998 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f630f3663b6c4644b31886bd77e2b4ed, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733473185763 2024-12-06T08:19:47,999 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting d420871fce514e30a14be79b6035ef4b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733473186083 2024-12-06T08:19:47,999 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2d502d1b5634017aaba5b9f6a9dd9b1, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733473186083 2024-12-06T08:19:47,999 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b862e88f7ed842798f570a0f8119ee41, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733473187287 2024-12-06T08:19:47,999 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e401d7e83e384e39b383896b700fec2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733473187287 2024-12-06T08:19:48,009 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#A#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:48,010 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/3a4880534fec41caabeb8638408b2183 is 50, key is test_row_0/A:col10/1733473187404/Put/seqid=0 2024-12-06T08:19:48,012 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#B#compaction#283 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:48,013 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/637ac2c219724e3da7722fdbe63e01e6 is 50, key is test_row_0/B:col10/1733473187404/Put/seqid=0 2024-12-06T08:19:48,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742154_1330 (size=13221) 2024-12-06T08:19:48,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742155_1331 (size=13221) 2024-12-06T08:19:48,021 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/3a4880534fec41caabeb8638408b2183 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3a4880534fec41caabeb8638408b2183 2024-12-06T08:19:48,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-06T08:19:48,028 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:48,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-06T08:19:48,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:48,029 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-06T08:19:48,032 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/A of 5310d6bf57bb8b709e1aec9222644a3d into 3a4880534fec41caabeb8638408b2183(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:48,032 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:48,032 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/A, priority=13, startTime=1733473187996; duration=0sec 2024-12-06T08:19:48,032 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:48,032 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:A 2024-12-06T08:19:48,033 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:48,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:48,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:48,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:48,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:48,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:48,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:48,034 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:48,034 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/C is initiating minor compaction (all files) 2024-12-06T08:19:48,034 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/C in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:48,034 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/94ef09681f6a401fa16c23370c1e8eda, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/8b3bc414a9aa41fe8b196471839fcae5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d3e23c144c1640848bf74b005fab390c] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=34.5 K 2024-12-06T08:19:48,035 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94ef09681f6a401fa16c23370c1e8eda, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733473185763 2024-12-06T08:19:48,036 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b3bc414a9aa41fe8b196471839fcae5, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733473186083 2024-12-06T08:19:48,036 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3e23c144c1640848bf74b005fab390c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733473187287 2024-12-06T08:19:48,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/2f11e413bec54352ae93ab50de60a74e is 50, key is test_row_0/A:col10/1733473188025/Put/seqid=0 2024-12-06T08:19:48,046 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#C#compaction#285 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:48,047 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/efcf8fa2848a45a9a224663b445a1448 is 50, key is test_row_0/C:col10/1733473187404/Put/seqid=0 2024-12-06T08:19:48,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:48,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:48,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742156_1332 (size=12301) 2024-12-06T08:19:48,057 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/2f11e413bec54352ae93ab50de60a74e 2024-12-06T08:19:48,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742157_1333 (size=13221) 2024-12-06T08:19:48,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/47cbc05b10904ec8b465053d6fc9b253 is 50, key is test_row_0/B:col10/1733473188025/Put/seqid=0 2024-12-06T08:19:48,075 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/efcf8fa2848a45a9a224663b445a1448 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/efcf8fa2848a45a9a224663b445a1448 2024-12-06T08:19:48,081 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/C of 5310d6bf57bb8b709e1aec9222644a3d into efcf8fa2848a45a9a224663b445a1448(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:48,082 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:48,082 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/C, priority=13, startTime=1733473187996; duration=0sec 2024-12-06T08:19:48,082 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:48,082 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:C 2024-12-06T08:19:48,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742158_1334 (size=12301) 2024-12-06T08:19:48,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:48,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473248137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:48,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:48,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473248240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:48,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-06T08:19:48,442 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/637ac2c219724e3da7722fdbe63e01e6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/637ac2c219724e3da7722fdbe63e01e6 2024-12-06T08:19:48,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:48,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473248443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:48,447 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/B of 5310d6bf57bb8b709e1aec9222644a3d into 637ac2c219724e3da7722fdbe63e01e6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:48,447 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:48,447 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/B, priority=13, startTime=1733473187996; duration=0sec 2024-12-06T08:19:48,448 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:48,448 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:B 2024-12-06T08:19:48,485 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/47cbc05b10904ec8b465053d6fc9b253 2024-12-06T08:19:48,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/0776d4ebe93a4459b48046ccade8dcd2 is 50, key is test_row_0/C:col10/1733473188025/Put/seqid=0 2024-12-06T08:19:48,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742159_1335 (size=12301) 2024-12-06T08:19:48,551 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/0776d4ebe93a4459b48046ccade8dcd2 2024-12-06T08:19:48,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/2f11e413bec54352ae93ab50de60a74e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/2f11e413bec54352ae93ab50de60a74e 2024-12-06T08:19:48,566 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/2f11e413bec54352ae93ab50de60a74e, entries=150, sequenceid=405, filesize=12.0 K 2024-12-06T08:19:48,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.StoreScanner(992): StoreScanner already has the close lock. There is no need to updateReaders 2024-12-06T08:19:48,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/47cbc05b10904ec8b465053d6fc9b253 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/47cbc05b10904ec8b465053d6fc9b253 2024-12-06T08:19:48,573 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/47cbc05b10904ec8b465053d6fc9b253, entries=150, sequenceid=405, filesize=12.0 K 2024-12-06T08:19:48,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/0776d4ebe93a4459b48046ccade8dcd2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/0776d4ebe93a4459b48046ccade8dcd2 2024-12-06T08:19:48,580 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/0776d4ebe93a4459b48046ccade8dcd2, entries=150, sequenceid=405, filesize=12.0 K 2024-12-06T08:19:48,582 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 5310d6bf57bb8b709e1aec9222644a3d in 552ms, sequenceid=405, compaction requested=false 2024-12-06T08:19:48,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:48,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:48,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-06T08:19:48,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-06T08:19:48,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-06T08:19:48,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 860 msec 2024-12-06T08:19:48,586 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 865 msec 2024-12-06T08:19:48,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:48,747 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-06T08:19:48,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:48,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:48,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:48,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:48,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:48,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:48,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/3495cae8726e4b2bb11ea56986ce8417 is 50, key is test_row_0/A:col10/1733473188136/Put/seqid=0 2024-12-06T08:19:48,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473248754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:48,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742160_1336 (size=14741) 2024-12-06T08:19:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-06T08:19:48,827 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-06T08:19:48,828 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:48,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-06T08:19:48,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T08:19:48,829 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:48,830 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:48,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:48,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:48,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473248856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:48,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T08:19:48,982 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:48,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T08:19:48,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:48,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:48,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:48,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:48,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:48,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:49,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473249059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:49,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T08:19:49,135 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:49,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T08:19:49,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:49,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/3495cae8726e4b2bb11ea56986ce8417 2024-12-06T08:19:49,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4e8d909105da4cc4855ae1670affb942 is 50, key is test_row_0/B:col10/1733473188136/Put/seqid=0 2024-12-06T08:19:49,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742161_1337 (size=12301) 2024-12-06T08:19:49,287 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:49,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T08:19:49,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:49,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,289 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:49,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473249362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:49,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T08:19:49,441 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:49,442 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T08:19:49,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:49,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,442 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4e8d909105da4cc4855ae1670affb942 2024-12-06T08:19:49,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/c4e4a7e8df514cac9f4ffc27f95d6f7a is 50, key is test_row_0/C:col10/1733473188136/Put/seqid=0 2024-12-06T08:19:49,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742162_1338 (size=12301) 2024-12-06T08:19:49,594 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:49,594 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T08:19:49,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:49,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,595 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,747 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:49,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T08:19:49,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:49,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,748 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:49,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473249865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:49,901 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:49,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T08:19:49,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:49,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:49,901 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:49,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T08:19:49,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/c4e4a7e8df514cac9f4ffc27f95d6f7a 2024-12-06T08:19:49,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/3495cae8726e4b2bb11ea56986ce8417 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3495cae8726e4b2bb11ea56986ce8417 2024-12-06T08:19:50,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3495cae8726e4b2bb11ea56986ce8417, entries=200, sequenceid=437, filesize=14.4 K 2024-12-06T08:19:50,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4e8d909105da4cc4855ae1670affb942 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4e8d909105da4cc4855ae1670affb942 2024-12-06T08:19:50,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4e8d909105da4cc4855ae1670affb942, entries=150, sequenceid=437, filesize=12.0 K 2024-12-06T08:19:50,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/c4e4a7e8df514cac9f4ffc27f95d6f7a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c4e4a7e8df514cac9f4ffc27f95d6f7a 2024-12-06T08:19:50,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c4e4a7e8df514cac9f4ffc27f95d6f7a, entries=150, sequenceid=437, filesize=12.0 K 2024-12-06T08:19:50,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 5310d6bf57bb8b709e1aec9222644a3d in 1265ms, sequenceid=437, compaction requested=true 2024-12-06T08:19:50,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:50,012 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:50,013 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40263 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:50,014 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/A is initiating minor compaction (all files) 2024-12-06T08:19:50,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:50,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:50,014 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/A in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:50,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:50,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:50,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:50,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:50,014 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:50,014 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3a4880534fec41caabeb8638408b2183, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/2f11e413bec54352ae93ab50de60a74e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3495cae8726e4b2bb11ea56986ce8417] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=39.3 K 2024-12-06T08:19:50,014 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a4880534fec41caabeb8638408b2183, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733473187287 2024-12-06T08:19:50,015 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f11e413bec54352ae93ab50de60a74e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733473187409 2024-12-06T08:19:50,015 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:50,015 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/B is initiating minor compaction (all files) 2024-12-06T08:19:50,015 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/B in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:50,015 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/637ac2c219724e3da7722fdbe63e01e6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/47cbc05b10904ec8b465053d6fc9b253, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4e8d909105da4cc4855ae1670affb942] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=36.9 K 2024-12-06T08:19:50,015 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3495cae8726e4b2bb11ea56986ce8417, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1733473188126 2024-12-06T08:19:50,016 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 637ac2c219724e3da7722fdbe63e01e6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733473187287 2024-12-06T08:19:50,016 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 47cbc05b10904ec8b465053d6fc9b253, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733473187409 2024-12-06T08:19:50,017 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e8d909105da4cc4855ae1670affb942, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1733473188126 2024-12-06T08:19:50,025 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#B#compaction#292 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:50,025 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#A#compaction#291 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:50,026 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/469eb0b117c841e6b063d62fb62a5593 is 50, key is test_row_0/B:col10/1733473188136/Put/seqid=0 2024-12-06T08:19:50,026 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/6c27a5eb339d4054a735c9e308c9fc8c is 50, key is test_row_0/A:col10/1733473188136/Put/seqid=0 2024-12-06T08:19:50,053 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:50,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-06T08:19:50,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:50,054 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-06T08:19:50,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:50,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:50,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:50,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:50,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:50,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:50,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742163_1339 (size=13323) 2024-12-06T08:19:50,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742164_1340 (size=13323) 2024-12-06T08:19:50,088 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/469eb0b117c841e6b063d62fb62a5593 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/469eb0b117c841e6b063d62fb62a5593 2024-12-06T08:19:50,093 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/B of 5310d6bf57bb8b709e1aec9222644a3d into 469eb0b117c841e6b063d62fb62a5593(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:50,093 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:50,093 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/B, priority=13, startTime=1733473190014; duration=0sec 2024-12-06T08:19:50,093 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:50,093 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:B 2024-12-06T08:19:50,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/4ac4748b10524d90ba901a467d94aced is 50, key is test_row_0/A:col10/1733473188751/Put/seqid=0 2024-12-06T08:19:50,094 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:50,095 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:50,096 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/C is initiating minor compaction (all files) 2024-12-06T08:19:50,096 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/C in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:50,096 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/efcf8fa2848a45a9a224663b445a1448, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/0776d4ebe93a4459b48046ccade8dcd2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c4e4a7e8df514cac9f4ffc27f95d6f7a] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=36.9 K 2024-12-06T08:19:50,096 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting efcf8fa2848a45a9a224663b445a1448, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733473187287 2024-12-06T08:19:50,097 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 0776d4ebe93a4459b48046ccade8dcd2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733473187409 2024-12-06T08:19:50,097 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting c4e4a7e8df514cac9f4ffc27f95d6f7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1733473188126 2024-12-06T08:19:50,118 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#C#compaction#294 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:50,119 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/56a1bda064424c8f9736a053c4b471ff is 50, key is test_row_0/C:col10/1733473188136/Put/seqid=0 2024-12-06T08:19:50,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742165_1341 (size=9857) 2024-12-06T08:19:50,123 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/4ac4748b10524d90ba901a467d94aced 2024-12-06T08:19:50,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/1fcdb9b8e18347319e3cf748c84b4e2c is 50, key is test_row_0/B:col10/1733473188751/Put/seqid=0 2024-12-06T08:19:50,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742166_1342 (size=13323) 2024-12-06T08:19:50,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742167_1343 (size=9857) 2024-12-06T08:19:50,162 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/1fcdb9b8e18347319e3cf748c84b4e2c 2024-12-06T08:19:50,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/c7e887b5264040bca1a3fc83b071b193 is 50, key is test_row_0/C:col10/1733473188751/Put/seqid=0 2024-12-06T08:19:50,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742168_1344 (size=9857) 2024-12-06T08:19:50,475 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/6c27a5eb339d4054a735c9e308c9fc8c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/6c27a5eb339d4054a735c9e308c9fc8c 2024-12-06T08:19:50,481 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/A of 5310d6bf57bb8b709e1aec9222644a3d into 6c27a5eb339d4054a735c9e308c9fc8c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:50,481 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:50,481 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/A, priority=13, startTime=1733473190012; duration=0sec 2024-12-06T08:19:50,481 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:50,481 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:A 2024-12-06T08:19:50,542 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/56a1bda064424c8f9736a053c4b471ff as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/56a1bda064424c8f9736a053c4b471ff 2024-12-06T08:19:50,547 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/C of 5310d6bf57bb8b709e1aec9222644a3d into 56a1bda064424c8f9736a053c4b471ff(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:50,547 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:50,547 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/C, priority=13, startTime=1733473190014; duration=0sec 2024-12-06T08:19:50,547 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:50,547 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:C 2024-12-06T08:19:50,580 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/c7e887b5264040bca1a3fc83b071b193 2024-12-06T08:19:50,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/4ac4748b10524d90ba901a467d94aced as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4ac4748b10524d90ba901a467d94aced 2024-12-06T08:19:50,590 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4ac4748b10524d90ba901a467d94aced, entries=100, sequenceid=443, filesize=9.6 K 2024-12-06T08:19:50,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/1fcdb9b8e18347319e3cf748c84b4e2c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1fcdb9b8e18347319e3cf748c84b4e2c 2024-12-06T08:19:50,596 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1fcdb9b8e18347319e3cf748c84b4e2c, entries=100, sequenceid=443, filesize=9.6 K 2024-12-06T08:19:50,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/c7e887b5264040bca1a3fc83b071b193 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c7e887b5264040bca1a3fc83b071b193 2024-12-06T08:19:50,601 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c7e887b5264040bca1a3fc83b071b193, entries=100, sequenceid=443, filesize=9.6 K 2024-12-06T08:19:50,602 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 5310d6bf57bb8b709e1aec9222644a3d in 547ms, sequenceid=443, compaction requested=false 2024-12-06T08:19:50,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:50,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:50,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-06T08:19:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-06T08:19:50,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-06T08:19:50,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7730 sec 2024-12-06T08:19:50,606 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.7770 sec 2024-12-06T08:19:50,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:50,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:19:50,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:50,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:50,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:50,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:50,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:50,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:50,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/9e2f3bc83a094c52a5cc73ad5f23ab1e is 50, key is test_row_0/A:col10/1733473190901/Put/seqid=0 2024-12-06T08:19:50,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742169_1345 (size=14741) 2024-12-06T08:19:50,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-06T08:19:50,934 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-06T08:19:50,935 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:19:50,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-12-06T08:19:50,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T08:19:50,936 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:19:50,937 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:19:50,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:19:50,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:50,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473250965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T08:19:51,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:51,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473251067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,088 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T08:19:51,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:51,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T08:19:51,241 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T08:19:51,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:51,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:51,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473251270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/9e2f3bc83a094c52a5cc73ad5f23ab1e 2024-12-06T08:19:51,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/2570f0b22e15453cac97d9906f808f88 is 50, key is test_row_0/B:col10/1733473190901/Put/seqid=0 2024-12-06T08:19:51,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742170_1346 (size=12301) 2024-12-06T08:19:51,394 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T08:19:51,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:51,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,395 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T08:19:51,547 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,548 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T08:19:51,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:51,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,548 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:51,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 300 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473251573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,700 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T08:19:51,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:51,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/2570f0b22e15453cac97d9906f808f88 2024-12-06T08:19:51,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/ed96adab5ef6403a9087b84c9856abd1 is 50, key is test_row_0/C:col10/1733473190901/Put/seqid=0 2024-12-06T08:19:51,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742171_1347 (size=12301) 2024-12-06T08:19:51,856 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T08:19:51,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:51,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:51,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:51,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:51,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45512 deadline: 1733473251901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,903 DEBUG [Thread-1147 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:19:51,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:51,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45550 deadline: 1733473251916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:51,917 DEBUG [Thread-1151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:19:51,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45558 deadline: 1733473251916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,918 DEBUG [Thread-1145 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:19:51,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:51,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45538 deadline: 1733473251923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:51,924 DEBUG [Thread-1149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:19:52,009 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:52,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T08:19:52,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:52,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:52,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:52,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:52,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:52,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:52,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T08:19:52,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:19:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45490 deadline: 1733473252077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:19:52,162 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:52,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T08:19:52,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:52,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:52,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:52,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:52,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:52,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:19:52,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/ed96adab5ef6403a9087b84c9856abd1 2024-12-06T08:19:52,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/9e2f3bc83a094c52a5cc73ad5f23ab1e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9e2f3bc83a094c52a5cc73ad5f23ab1e 2024-12-06T08:19:52,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9e2f3bc83a094c52a5cc73ad5f23ab1e, entries=200, sequenceid=457, filesize=14.4 K 2024-12-06T08:19:52,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/2570f0b22e15453cac97d9906f808f88 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2570f0b22e15453cac97d9906f808f88 2024-12-06T08:19:52,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2570f0b22e15453cac97d9906f808f88, entries=150, sequenceid=457, filesize=12.0 K 2024-12-06T08:19:52,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/ed96adab5ef6403a9087b84c9856abd1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ed96adab5ef6403a9087b84c9856abd1 2024-12-06T08:19:52,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ed96adab5ef6403a9087b84c9856abd1, entries=150, sequenceid=457, filesize=12.0 K 2024-12-06T08:19:52,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 5310d6bf57bb8b709e1aec9222644a3d in 1287ms, sequenceid=457, compaction requested=true 2024-12-06T08:19:52,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:52,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:19:52,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:52,191 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:52,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:19:52,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:52,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5310d6bf57bb8b709e1aec9222644a3d:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:19:52,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:52,191 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:52,201 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:52,201 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37921 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:52,201 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/B is initiating minor compaction (all files) 2024-12-06T08:19:52,201 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/A is initiating minor compaction (all files) 2024-12-06T08:19:52,201 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/B in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:52,201 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/A in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:52,201 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/469eb0b117c841e6b063d62fb62a5593, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1fcdb9b8e18347319e3cf748c84b4e2c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2570f0b22e15453cac97d9906f808f88] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=34.6 K 2024-12-06T08:19:52,201 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/6c27a5eb339d4054a735c9e308c9fc8c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4ac4748b10524d90ba901a467d94aced, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9e2f3bc83a094c52a5cc73ad5f23ab1e] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=37.0 K 2024-12-06T08:19:52,201 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c27a5eb339d4054a735c9e308c9fc8c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1733473188126 2024-12-06T08:19:52,202 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ac4748b10524d90ba901a467d94aced, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1733473188751 2024-12-06T08:19:52,202 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 469eb0b117c841e6b063d62fb62a5593, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1733473188126 2024-12-06T08:19:52,202 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e2f3bc83a094c52a5cc73ad5f23ab1e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733473190878 2024-12-06T08:19:52,202 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fcdb9b8e18347319e3cf748c84b4e2c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1733473188751 2024-12-06T08:19:52,202 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2570f0b22e15453cac97d9906f808f88, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733473190900 2024-12-06T08:19:52,214 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#B#compaction#301 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:52,214 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#A#compaction#300 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:52,214 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/089369e9aeae49eca5cff2a30011c1ad is 50, key is test_row_0/B:col10/1733473190901/Put/seqid=0 2024-12-06T08:19:52,215 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/fee4b9729f9c409d98bc263b85ad4981 is 50, key is test_row_0/A:col10/1733473190901/Put/seqid=0 2024-12-06T08:19:52,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742173_1349 (size=13425) 2024-12-06T08:19:52,247 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/fee4b9729f9c409d98bc263b85ad4981 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/fee4b9729f9c409d98bc263b85ad4981 2024-12-06T08:19:52,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742172_1348 (size=13425) 2024-12-06T08:19:52,253 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/A of 5310d6bf57bb8b709e1aec9222644a3d into fee4b9729f9c409d98bc263b85ad4981(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:52,253 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:52,253 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/A, priority=13, startTime=1733473192191; duration=0sec 2024-12-06T08:19:52,253 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:19:52,253 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:A 2024-12-06T08:19:52,253 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:19:52,255 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:19:52,255 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 5310d6bf57bb8b709e1aec9222644a3d/C is initiating minor compaction (all files) 2024-12-06T08:19:52,255 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5310d6bf57bb8b709e1aec9222644a3d/C in TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:52,255 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/56a1bda064424c8f9736a053c4b471ff, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c7e887b5264040bca1a3fc83b071b193, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ed96adab5ef6403a9087b84c9856abd1] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp, totalSize=34.6 K 2024-12-06T08:19:52,256 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56a1bda064424c8f9736a053c4b471ff, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1733473188126 2024-12-06T08:19:52,256 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/089369e9aeae49eca5cff2a30011c1ad as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/089369e9aeae49eca5cff2a30011c1ad 2024-12-06T08:19:52,256 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7e887b5264040bca1a3fc83b071b193, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1733473188751 2024-12-06T08:19:52,257 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed96adab5ef6403a9087b84c9856abd1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733473190900 2024-12-06T08:19:52,262 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/B of 5310d6bf57bb8b709e1aec9222644a3d into 089369e9aeae49eca5cff2a30011c1ad(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:52,262 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:52,262 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/B, priority=13, startTime=1733473192191; duration=0sec 2024-12-06T08:19:52,262 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:52,262 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:B 2024-12-06T08:19:52,282 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5310d6bf57bb8b709e1aec9222644a3d#C#compaction#302 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:19:52,283 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/672ec47dc5004a8bac4e54b792984554 is 50, key is test_row_0/C:col10/1733473190901/Put/seqid=0 2024-12-06T08:19:52,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742174_1350 (size=13425) 2024-12-06T08:19:52,293 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/672ec47dc5004a8bac4e54b792984554 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/672ec47dc5004a8bac4e54b792984554 2024-12-06T08:19:52,299 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5310d6bf57bb8b709e1aec9222644a3d/C of 5310d6bf57bb8b709e1aec9222644a3d into 672ec47dc5004a8bac4e54b792984554(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:19:52,299 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:52,299 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d., storeName=5310d6bf57bb8b709e1aec9222644a3d/C, priority=13, startTime=1733473192191; duration=0sec 2024-12-06T08:19:52,300 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:19:52,300 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5310d6bf57bb8b709e1aec9222644a3d:C 2024-12-06T08:19:52,315 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:19:52,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-06T08:19:52,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:52,316 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:19:52,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:19:52,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:52,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:19:52,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:52,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:19:52,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:19:52,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/00623dfc5e814f6fbb8f65c9be4ceb0b is 50, key is test_row_0/A:col10/1733473190956/Put/seqid=0 2024-12-06T08:19:52,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742175_1351 (size=12301) 2024-12-06T08:19:52,725 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=485 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/00623dfc5e814f6fbb8f65c9be4ceb0b 2024-12-06T08:19:52,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/02f9ac82435c4fcc957c2fb70acb294f is 50, key is test_row_0/B:col10/1733473190956/Put/seqid=0 2024-12-06T08:19:52,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742176_1352 (size=12301) 2024-12-06T08:19:52,837 DEBUG [Thread-1160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e998dd3 to 127.0.0.1:65195 2024-12-06T08:19:52,837 DEBUG [Thread-1162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e4c79b8 to 127.0.0.1:65195 2024-12-06T08:19:52,837 DEBUG [Thread-1160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:52,837 DEBUG [Thread-1162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:52,838 DEBUG [Thread-1158 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:65195 2024-12-06T08:19:52,838 DEBUG [Thread-1158 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:52,840 DEBUG [Thread-1156 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78b04266 to 127.0.0.1:65195 2024-12-06T08:19:52,840 DEBUG [Thread-1156 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:52,841 DEBUG [Thread-1154 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17cf7fc0 to 127.0.0.1:65195 2024-12-06T08:19:52,841 DEBUG [Thread-1154 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T08:19:53,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:19:53,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. as already flushing 2024-12-06T08:19:53,087 DEBUG [Thread-1143 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58341641 to 127.0.0.1:65195 2024-12-06T08:19:53,087 DEBUG [Thread-1143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:19:53,139 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=485 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/02f9ac82435c4fcc957c2fb70acb294f 2024-12-06T08:19:53,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/fa9628195aba4d7bb0c1c9cfd3d19803 is 50, key is test_row_0/C:col10/1733473190956/Put/seqid=0 2024-12-06T08:19:53,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742177_1353 (size=12301) 2024-12-06T08:19:53,550 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=485 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/fa9628195aba4d7bb0c1c9cfd3d19803 2024-12-06T08:19:53,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/00623dfc5e814f6fbb8f65c9be4ceb0b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/00623dfc5e814f6fbb8f65c9be4ceb0b 2024-12-06T08:19:53,558 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/00623dfc5e814f6fbb8f65c9be4ceb0b, entries=150, sequenceid=485, filesize=12.0 K 2024-12-06T08:19:53,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/02f9ac82435c4fcc957c2fb70acb294f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/02f9ac82435c4fcc957c2fb70acb294f 2024-12-06T08:19:53,562 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/02f9ac82435c4fcc957c2fb70acb294f, entries=150, sequenceid=485, filesize=12.0 K 2024-12-06T08:19:53,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/fa9628195aba4d7bb0c1c9cfd3d19803 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/fa9628195aba4d7bb0c1c9cfd3d19803 2024-12-06T08:19:53,566 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/fa9628195aba4d7bb0c1c9cfd3d19803, entries=150, sequenceid=485, filesize=12.0 K 2024-12-06T08:19:53,567 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=6.71 KB/6870 for 5310d6bf57bb8b709e1aec9222644a3d in 1250ms, sequenceid=485, compaction requested=false 2024-12-06T08:19:53,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:19:53,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:19:53,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-12-06T08:19:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-12-06T08:19:53,569 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-06T08:19:53,569 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6310 sec 2024-12-06T08:19:53,570 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 2.6340 sec 2024-12-06T08:19:55,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-06T08:19:55,041 INFO [Thread-1153 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-06T08:19:59,738 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:20:01,943 DEBUG [Thread-1149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683b64c3 to 127.0.0.1:65195 2024-12-06T08:20:01,943 DEBUG [Thread-1149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:01,971 DEBUG [Thread-1151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07e55eb7 to 127.0.0.1:65195 2024-12-06T08:20:01,971 DEBUG [Thread-1151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:01,987 DEBUG [Thread-1147 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64ee0130 to 127.0.0.1:65195 2024-12-06T08:20:01,987 DEBUG [Thread-1147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:01,998 DEBUG [Thread-1145 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44645c55 to 127.0.0.1:65195 2024-12-06T08:20:01,998 DEBUG [Thread-1145 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 205 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 45 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7056 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7083 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6633 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7116 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7069 2024-12-06T08:20:01,998 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T08:20:01,998 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:20:01,998 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b70f48f to 127.0.0.1:65195 2024-12-06T08:20:01,998 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:01,999 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T08:20:01,999 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T08:20:02,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:02,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T08:20:02,001 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473202001"}]},"ts":"1733473202001"} 2024-12-06T08:20:02,002 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T08:20:02,004 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T08:20:02,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T08:20:02,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5310d6bf57bb8b709e1aec9222644a3d, UNASSIGN}] 2024-12-06T08:20:02,006 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5310d6bf57bb8b709e1aec9222644a3d, UNASSIGN 2024-12-06T08:20:02,007 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=5310d6bf57bb8b709e1aec9222644a3d, regionState=CLOSING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:02,007 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T08:20:02,007 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:20:02,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T08:20:02,159 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:02,159 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:20:02,159 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T08:20:02,159 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 5310d6bf57bb8b709e1aec9222644a3d, disabling compactions & flushes 2024-12-06T08:20:02,159 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:20:02,159 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:20:02,159 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. after waiting 0 ms 2024-12-06T08:20:02,159 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:20:02,159 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(2837): Flushing 5310d6bf57bb8b709e1aec9222644a3d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-06T08:20:02,160 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=A 2024-12-06T08:20:02,160 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:02,160 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=B 2024-12-06T08:20:02,160 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:02,160 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5310d6bf57bb8b709e1aec9222644a3d, store=C 2024-12-06T08:20:02,160 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:02,163 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/9a7e3c1ee88549e08c94ce5179bc6db5 is 50, key is test_row_0/A:col10/1733473201997/Put/seqid=0 2024-12-06T08:20:02,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742178_1354 (size=12301) 2024-12-06T08:20:02,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T08:20:02,568 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/9a7e3c1ee88549e08c94ce5179bc6db5 2024-12-06T08:20:02,574 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4ed43642490c4286985e1d17ea053b6d is 50, key is test_row_0/B:col10/1733473201997/Put/seqid=0 2024-12-06T08:20:02,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742179_1355 (size=12301) 2024-12-06T08:20:02,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T08:20:02,980 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4ed43642490c4286985e1d17ea053b6d 2024-12-06T08:20:02,986 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/da6442f703984deb9f9a3c4c212811be is 50, key is test_row_0/C:col10/1733473201997/Put/seqid=0 2024-12-06T08:20:02,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742180_1356 (size=12301) 2024-12-06T08:20:03,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T08:20:03,390 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/da6442f703984deb9f9a3c4c212811be 2024-12-06T08:20:03,395 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/A/9a7e3c1ee88549e08c94ce5179bc6db5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9a7e3c1ee88549e08c94ce5179bc6db5 2024-12-06T08:20:03,398 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9a7e3c1ee88549e08c94ce5179bc6db5, entries=150, sequenceid=493, filesize=12.0 K 2024-12-06T08:20:03,399 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/B/4ed43642490c4286985e1d17ea053b6d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4ed43642490c4286985e1d17ea053b6d 2024-12-06T08:20:03,402 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4ed43642490c4286985e1d17ea053b6d, entries=150, sequenceid=493, filesize=12.0 K 2024-12-06T08:20:03,402 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/.tmp/C/da6442f703984deb9f9a3c4c212811be as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/da6442f703984deb9f9a3c4c212811be 2024-12-06T08:20:03,405 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/da6442f703984deb9f9a3c4c212811be, entries=150, sequenceid=493, filesize=12.0 K 2024-12-06T08:20:03,406 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 5310d6bf57bb8b709e1aec9222644a3d in 1247ms, sequenceid=493, compaction requested=true 2024-12-06T08:20:03,406 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/04889eb3d522417f86f8670cddc8a602, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/74d390e8034f4c628b2a9fb98ef3a01d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b0dba054e79842dbb9534254850f0f47, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/911723aeb2074f69a4727570697bd869, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b6e712398049452eb0659e0441db9896, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b5991595fe4f4aa6b9c9b943674e33b6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/def887f0d7424931a5d6ebe4fb0ed61e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d38b36100093481c971b971015756f07, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4cff005d68b541ecbd1941222dcd720c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/25839990540c453592bc9ac4942f229d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3855689007464a1eb0712f9a700fbd4d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/cf22fbd8618c401ab979bd7955195d61, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/8f581c53111e4d80b3d1e3adc8194966, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/175181d415f74aeea999c3c803a95b2a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9b7e06efef45480eb898b5ce11d589e0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d43fbb81c6db448a8ba6e5e6fad64b88, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/5d78125c9f7a46d59eb9048a2ee5ddda, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/7c1a0a0179c341f6945ec87d722bc12e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/36a30f4d00b64ca7a645f47d332ca028, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/52ec04e1f77242b29e63c6972f91cafd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c15d6eb3616b4f5c91076815ddb7ffed, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/1ae0dd22d45d42588194b29c14fdb74c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f8bf69ce03084a799be6b6c0c4644c7b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f208d293f61a4dd2a319b51340f3beaa, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/77c78d355a314b1181a2b1254c065cce, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c2d502d1b5634017aaba5b9f6a9dd9b1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b862e88f7ed842798f570a0f8119ee41, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3a4880534fec41caabeb8638408b2183, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/2f11e413bec54352ae93ab50de60a74e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3495cae8726e4b2bb11ea56986ce8417, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/6c27a5eb339d4054a735c9e308c9fc8c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4ac4748b10524d90ba901a467d94aced, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9e2f3bc83a094c52a5cc73ad5f23ab1e] to archive 2024-12-06T08:20:03,407 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:20:03,409 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/04889eb3d522417f86f8670cddc8a602 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/04889eb3d522417f86f8670cddc8a602 2024-12-06T08:20:03,410 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/74d390e8034f4c628b2a9fb98ef3a01d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/74d390e8034f4c628b2a9fb98ef3a01d 2024-12-06T08:20:03,411 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b0dba054e79842dbb9534254850f0f47 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b0dba054e79842dbb9534254850f0f47 2024-12-06T08:20:03,412 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/911723aeb2074f69a4727570697bd869 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/911723aeb2074f69a4727570697bd869 2024-12-06T08:20:03,413 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b6e712398049452eb0659e0441db9896 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b6e712398049452eb0659e0441db9896 2024-12-06T08:20:03,413 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b5991595fe4f4aa6b9c9b943674e33b6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b5991595fe4f4aa6b9c9b943674e33b6 2024-12-06T08:20:03,414 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/def887f0d7424931a5d6ebe4fb0ed61e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/def887f0d7424931a5d6ebe4fb0ed61e 2024-12-06T08:20:03,415 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d38b36100093481c971b971015756f07 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d38b36100093481c971b971015756f07 2024-12-06T08:20:03,416 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4cff005d68b541ecbd1941222dcd720c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4cff005d68b541ecbd1941222dcd720c 2024-12-06T08:20:03,417 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/25839990540c453592bc9ac4942f229d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/25839990540c453592bc9ac4942f229d 2024-12-06T08:20:03,418 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3855689007464a1eb0712f9a700fbd4d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3855689007464a1eb0712f9a700fbd4d 2024-12-06T08:20:03,419 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/cf22fbd8618c401ab979bd7955195d61 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/cf22fbd8618c401ab979bd7955195d61 2024-12-06T08:20:03,420 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/8f581c53111e4d80b3d1e3adc8194966 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/8f581c53111e4d80b3d1e3adc8194966 2024-12-06T08:20:03,421 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/175181d415f74aeea999c3c803a95b2a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/175181d415f74aeea999c3c803a95b2a 2024-12-06T08:20:03,422 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9b7e06efef45480eb898b5ce11d589e0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9b7e06efef45480eb898b5ce11d589e0 2024-12-06T08:20:03,423 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d43fbb81c6db448a8ba6e5e6fad64b88 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/d43fbb81c6db448a8ba6e5e6fad64b88 2024-12-06T08:20:03,424 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/5d78125c9f7a46d59eb9048a2ee5ddda to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/5d78125c9f7a46d59eb9048a2ee5ddda 2024-12-06T08:20:03,425 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/7c1a0a0179c341f6945ec87d722bc12e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/7c1a0a0179c341f6945ec87d722bc12e 2024-12-06T08:20:03,426 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/36a30f4d00b64ca7a645f47d332ca028 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/36a30f4d00b64ca7a645f47d332ca028 2024-12-06T08:20:03,427 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/52ec04e1f77242b29e63c6972f91cafd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/52ec04e1f77242b29e63c6972f91cafd 2024-12-06T08:20:03,428 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c15d6eb3616b4f5c91076815ddb7ffed to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c15d6eb3616b4f5c91076815ddb7ffed 2024-12-06T08:20:03,429 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/1ae0dd22d45d42588194b29c14fdb74c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/1ae0dd22d45d42588194b29c14fdb74c 2024-12-06T08:20:03,429 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f8bf69ce03084a799be6b6c0c4644c7b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f8bf69ce03084a799be6b6c0c4644c7b 2024-12-06T08:20:03,430 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f208d293f61a4dd2a319b51340f3beaa to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/f208d293f61a4dd2a319b51340f3beaa 2024-12-06T08:20:03,431 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/77c78d355a314b1181a2b1254c065cce to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/77c78d355a314b1181a2b1254c065cce 2024-12-06T08:20:03,432 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c2d502d1b5634017aaba5b9f6a9dd9b1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/c2d502d1b5634017aaba5b9f6a9dd9b1 2024-12-06T08:20:03,433 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b862e88f7ed842798f570a0f8119ee41 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/b862e88f7ed842798f570a0f8119ee41 2024-12-06T08:20:03,434 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3a4880534fec41caabeb8638408b2183 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3a4880534fec41caabeb8638408b2183 2024-12-06T08:20:03,435 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/2f11e413bec54352ae93ab50de60a74e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/2f11e413bec54352ae93ab50de60a74e 2024-12-06T08:20:03,436 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3495cae8726e4b2bb11ea56986ce8417 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/3495cae8726e4b2bb11ea56986ce8417 2024-12-06T08:20:03,437 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/6c27a5eb339d4054a735c9e308c9fc8c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/6c27a5eb339d4054a735c9e308c9fc8c 2024-12-06T08:20:03,438 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4ac4748b10524d90ba901a467d94aced to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/4ac4748b10524d90ba901a467d94aced 2024-12-06T08:20:03,440 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9e2f3bc83a094c52a5cc73ad5f23ab1e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9e2f3bc83a094c52a5cc73ad5f23ab1e 2024-12-06T08:20:03,441 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1f566ef2322a4c0ab9b95ed5d8995842, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/772f33b1b9264136a2d54c136307cf99, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5ab722c5653e42e0aa4d347c604a01eb, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/6e8836bcdfd8426cb8d0bc8a4e07ec88, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f6decd799720485ea36e98540a839648, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bccc054396b4cef978bb8faf3997662, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bd8614236e14fa0a92486217567a1dc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1e37ad58156f430bb2411e290dbc46b0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/34fe811edc044ca4adb6eabdeb1a7859, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5cf712284811486e89e1f411adf94ad2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/742901966e414088922dda8a4aaa0b04, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2e0013a500fc4aa299e770784c8638df, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/7e9e38d6758545b699e9f7bf13ad58c9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f0816ac82cfe4ed7b0e485782c56843c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/dc6a495da7da4117a16e7a8db0938a54, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/92c43084adca448c982bf2b45f96e9a1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ee725fa5048247f3a0ff16b90436f678, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d3d5534abf5b4d5bb68155eef1d1461a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/cedb11d11ff044fd857b3e2401f8b003, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/9aaaebbec4e6444f89a9a7f82a37261f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/3f79ecdd82d94314b87ec8ad7fcf61ac, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ff458ff9e3f94733955b7839c2d7547f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2c42842cee7b4622b214a098005338e3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f630f3663b6c4644b31886bd77e2b4ed, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d115dae5eee242d0adf6c85a08169126, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d420871fce514e30a14be79b6035ef4b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/637ac2c219724e3da7722fdbe63e01e6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/e401d7e83e384e39b383896b700fec2e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/47cbc05b10904ec8b465053d6fc9b253, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/469eb0b117c841e6b063d62fb62a5593, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4e8d909105da4cc4855ae1670affb942, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1fcdb9b8e18347319e3cf748c84b4e2c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2570f0b22e15453cac97d9906f808f88] to archive 2024-12-06T08:20:03,442 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:20:03,444 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1f566ef2322a4c0ab9b95ed5d8995842 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1f566ef2322a4c0ab9b95ed5d8995842 2024-12-06T08:20:03,445 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/772f33b1b9264136a2d54c136307cf99 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/772f33b1b9264136a2d54c136307cf99 2024-12-06T08:20:03,446 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5ab722c5653e42e0aa4d347c604a01eb to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5ab722c5653e42e0aa4d347c604a01eb 2024-12-06T08:20:03,447 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/6e8836bcdfd8426cb8d0bc8a4e07ec88 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/6e8836bcdfd8426cb8d0bc8a4e07ec88 2024-12-06T08:20:03,448 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f6decd799720485ea36e98540a839648 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f6decd799720485ea36e98540a839648 2024-12-06T08:20:03,449 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bccc054396b4cef978bb8faf3997662 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bccc054396b4cef978bb8faf3997662 2024-12-06T08:20:03,451 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bd8614236e14fa0a92486217567a1dc to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4bd8614236e14fa0a92486217567a1dc 2024-12-06T08:20:03,452 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1e37ad58156f430bb2411e290dbc46b0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1e37ad58156f430bb2411e290dbc46b0 2024-12-06T08:20:03,453 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/34fe811edc044ca4adb6eabdeb1a7859 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/34fe811edc044ca4adb6eabdeb1a7859 2024-12-06T08:20:03,454 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5cf712284811486e89e1f411adf94ad2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/5cf712284811486e89e1f411adf94ad2 2024-12-06T08:20:03,455 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/742901966e414088922dda8a4aaa0b04 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/742901966e414088922dda8a4aaa0b04 2024-12-06T08:20:03,456 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2e0013a500fc4aa299e770784c8638df to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2e0013a500fc4aa299e770784c8638df 2024-12-06T08:20:03,457 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/7e9e38d6758545b699e9f7bf13ad58c9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/7e9e38d6758545b699e9f7bf13ad58c9 2024-12-06T08:20:03,458 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f0816ac82cfe4ed7b0e485782c56843c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f0816ac82cfe4ed7b0e485782c56843c 2024-12-06T08:20:03,459 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/dc6a495da7da4117a16e7a8db0938a54 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/dc6a495da7da4117a16e7a8db0938a54 2024-12-06T08:20:03,460 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/92c43084adca448c982bf2b45f96e9a1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/92c43084adca448c982bf2b45f96e9a1 2024-12-06T08:20:03,462 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ee725fa5048247f3a0ff16b90436f678 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ee725fa5048247f3a0ff16b90436f678 2024-12-06T08:20:03,463 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d3d5534abf5b4d5bb68155eef1d1461a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d3d5534abf5b4d5bb68155eef1d1461a 2024-12-06T08:20:03,464 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/cedb11d11ff044fd857b3e2401f8b003 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/cedb11d11ff044fd857b3e2401f8b003 2024-12-06T08:20:03,465 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/9aaaebbec4e6444f89a9a7f82a37261f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/9aaaebbec4e6444f89a9a7f82a37261f 2024-12-06T08:20:03,466 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/3f79ecdd82d94314b87ec8ad7fcf61ac to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/3f79ecdd82d94314b87ec8ad7fcf61ac 2024-12-06T08:20:03,467 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ff458ff9e3f94733955b7839c2d7547f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/ff458ff9e3f94733955b7839c2d7547f 2024-12-06T08:20:03,468 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2c42842cee7b4622b214a098005338e3 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2c42842cee7b4622b214a098005338e3 2024-12-06T08:20:03,469 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f630f3663b6c4644b31886bd77e2b4ed to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/f630f3663b6c4644b31886bd77e2b4ed 2024-12-06T08:20:03,470 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d115dae5eee242d0adf6c85a08169126 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d115dae5eee242d0adf6c85a08169126 2024-12-06T08:20:03,471 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d420871fce514e30a14be79b6035ef4b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/d420871fce514e30a14be79b6035ef4b 2024-12-06T08:20:03,472 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/637ac2c219724e3da7722fdbe63e01e6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/637ac2c219724e3da7722fdbe63e01e6 2024-12-06T08:20:03,473 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/e401d7e83e384e39b383896b700fec2e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/e401d7e83e384e39b383896b700fec2e 2024-12-06T08:20:03,475 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/47cbc05b10904ec8b465053d6fc9b253 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/47cbc05b10904ec8b465053d6fc9b253 2024-12-06T08:20:03,476 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/469eb0b117c841e6b063d62fb62a5593 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/469eb0b117c841e6b063d62fb62a5593 2024-12-06T08:20:03,477 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4e8d909105da4cc4855ae1670affb942 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4e8d909105da4cc4855ae1670affb942 2024-12-06T08:20:03,479 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1fcdb9b8e18347319e3cf748c84b4e2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/1fcdb9b8e18347319e3cf748c84b4e2c 2024-12-06T08:20:03,480 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2570f0b22e15453cac97d9906f808f88 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/2570f0b22e15453cac97d9906f808f88 2024-12-06T08:20:03,481 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/43a01b8ec4dc4640a439150ae824d0db, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/62585c00485445bab2309c8ba366bd58, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/b354f828cd2440dea84b72e095d33801, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d9023e2e80104dd6a2f577ee9c6b4c93, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/570bc109770f47a69433d11fe0dcf2b4, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/fafae1f109784ed8b9ef64d557499757, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/1036540952144fa6a3acf41f1c02ab82, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/4ad4584412494429befb59ea0040dc2a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/2efa7f55835a4bc8b7cb759bd42ef431, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab53af954e5c4d879bfd5e55151c3b32, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/72cfbba84fec4c099446a1a6dccf9eae, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/dbf124e1127f4750807677c1ff0ad843, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/5bb70f46bf11447d9fc32e2751de910d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/896eaa177ff34137a0ae2acacfda741b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/92f8fcfaee3342e481efa63c00274c4b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/7d8fb85603b44f58b4aea71354314e59, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/560ca0d1053f4c14a315810c9e41c834, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c22d3a6a1c34493bb777aef0301cc343, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/86548843db444c9eb4186321f2300155, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/28f65f363151429e85e2238dc07668e4, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab053350fafb48b883ff61e9315590a5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/3ff713d530994686a27151ba51b8886b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/55ccef09352f4b0680634f00515e690e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/94ef09681f6a401fa16c23370c1e8eda, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/9b773bab7a214efa8de6310135a30a52, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/8b3bc414a9aa41fe8b196471839fcae5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/efcf8fa2848a45a9a224663b445a1448, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d3e23c144c1640848bf74b005fab390c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/0776d4ebe93a4459b48046ccade8dcd2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/56a1bda064424c8f9736a053c4b471ff, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c4e4a7e8df514cac9f4ffc27f95d6f7a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c7e887b5264040bca1a3fc83b071b193, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ed96adab5ef6403a9087b84c9856abd1] to archive 2024-12-06T08:20:03,483 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:20:03,485 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/43a01b8ec4dc4640a439150ae824d0db to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/43a01b8ec4dc4640a439150ae824d0db 2024-12-06T08:20:03,486 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/62585c00485445bab2309c8ba366bd58 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/62585c00485445bab2309c8ba366bd58 2024-12-06T08:20:03,487 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/b354f828cd2440dea84b72e095d33801 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/b354f828cd2440dea84b72e095d33801 2024-12-06T08:20:03,488 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d9023e2e80104dd6a2f577ee9c6b4c93 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d9023e2e80104dd6a2f577ee9c6b4c93 2024-12-06T08:20:03,489 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/570bc109770f47a69433d11fe0dcf2b4 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/570bc109770f47a69433d11fe0dcf2b4 2024-12-06T08:20:03,490 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/fafae1f109784ed8b9ef64d557499757 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/fafae1f109784ed8b9ef64d557499757 2024-12-06T08:20:03,491 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/1036540952144fa6a3acf41f1c02ab82 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/1036540952144fa6a3acf41f1c02ab82 2024-12-06T08:20:03,492 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/4ad4584412494429befb59ea0040dc2a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/4ad4584412494429befb59ea0040dc2a 2024-12-06T08:20:03,493 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/2efa7f55835a4bc8b7cb759bd42ef431 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/2efa7f55835a4bc8b7cb759bd42ef431 2024-12-06T08:20:03,494 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab53af954e5c4d879bfd5e55151c3b32 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab53af954e5c4d879bfd5e55151c3b32 2024-12-06T08:20:03,496 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/72cfbba84fec4c099446a1a6dccf9eae to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/72cfbba84fec4c099446a1a6dccf9eae 2024-12-06T08:20:03,497 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/dbf124e1127f4750807677c1ff0ad843 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/dbf124e1127f4750807677c1ff0ad843 2024-12-06T08:20:03,498 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/5bb70f46bf11447d9fc32e2751de910d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/5bb70f46bf11447d9fc32e2751de910d 2024-12-06T08:20:03,499 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/896eaa177ff34137a0ae2acacfda741b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/896eaa177ff34137a0ae2acacfda741b 2024-12-06T08:20:03,500 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/92f8fcfaee3342e481efa63c00274c4b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/92f8fcfaee3342e481efa63c00274c4b 2024-12-06T08:20:03,501 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/7d8fb85603b44f58b4aea71354314e59 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/7d8fb85603b44f58b4aea71354314e59 2024-12-06T08:20:03,502 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/560ca0d1053f4c14a315810c9e41c834 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/560ca0d1053f4c14a315810c9e41c834 2024-12-06T08:20:03,503 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c22d3a6a1c34493bb777aef0301cc343 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c22d3a6a1c34493bb777aef0301cc343 2024-12-06T08:20:03,504 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/86548843db444c9eb4186321f2300155 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/86548843db444c9eb4186321f2300155 2024-12-06T08:20:03,506 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/28f65f363151429e85e2238dc07668e4 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/28f65f363151429e85e2238dc07668e4 2024-12-06T08:20:03,507 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab053350fafb48b883ff61e9315590a5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ab053350fafb48b883ff61e9315590a5 2024-12-06T08:20:03,508 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/3ff713d530994686a27151ba51b8886b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/3ff713d530994686a27151ba51b8886b 2024-12-06T08:20:03,510 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/55ccef09352f4b0680634f00515e690e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/55ccef09352f4b0680634f00515e690e 2024-12-06T08:20:03,511 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/94ef09681f6a401fa16c23370c1e8eda to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/94ef09681f6a401fa16c23370c1e8eda 2024-12-06T08:20:03,512 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/9b773bab7a214efa8de6310135a30a52 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/9b773bab7a214efa8de6310135a30a52 2024-12-06T08:20:03,514 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/8b3bc414a9aa41fe8b196471839fcae5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/8b3bc414a9aa41fe8b196471839fcae5 2024-12-06T08:20:03,515 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/efcf8fa2848a45a9a224663b445a1448 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/efcf8fa2848a45a9a224663b445a1448 2024-12-06T08:20:03,516 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d3e23c144c1640848bf74b005fab390c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/d3e23c144c1640848bf74b005fab390c 2024-12-06T08:20:03,518 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/0776d4ebe93a4459b48046ccade8dcd2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/0776d4ebe93a4459b48046ccade8dcd2 2024-12-06T08:20:03,519 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/56a1bda064424c8f9736a053c4b471ff to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/56a1bda064424c8f9736a053c4b471ff 2024-12-06T08:20:03,521 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c4e4a7e8df514cac9f4ffc27f95d6f7a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c4e4a7e8df514cac9f4ffc27f95d6f7a 2024-12-06T08:20:03,522 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c7e887b5264040bca1a3fc83b071b193 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/c7e887b5264040bca1a3fc83b071b193 2024-12-06T08:20:03,523 DEBUG [StoreCloser-TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ed96adab5ef6403a9087b84c9856abd1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/ed96adab5ef6403a9087b84c9856abd1 2024-12-06T08:20:03,527 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/recovered.edits/496.seqid, newMaxSeqId=496, maxSeqId=1 2024-12-06T08:20:03,528 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d. 2024-12-06T08:20:03,528 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 5310d6bf57bb8b709e1aec9222644a3d: 2024-12-06T08:20:03,529 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:20:03,530 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=5310d6bf57bb8b709e1aec9222644a3d, regionState=CLOSED 2024-12-06T08:20:03,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-06T08:20:03,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 5310d6bf57bb8b709e1aec9222644a3d, server=b6b797fc3981,38041,1733473111442 in 1.5240 sec 2024-12-06T08:20:03,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-06T08:20:03,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5310d6bf57bb8b709e1aec9222644a3d, UNASSIGN in 1.5260 sec 2024-12-06T08:20:03,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-06T08:20:03,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5280 sec 2024-12-06T08:20:03,536 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473203535"}]},"ts":"1733473203535"} 2024-12-06T08:20:03,536 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T08:20:03,538 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T08:20:03,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5400 sec 2024-12-06T08:20:04,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-06T08:20:04,105 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-06T08:20:04,106 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T08:20:04,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:04,107 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:04,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-06T08:20:04,108 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=97, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:04,109 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:20:04,111 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/recovered.edits] 2024-12-06T08:20:04,113 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/00623dfc5e814f6fbb8f65c9be4ceb0b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/00623dfc5e814f6fbb8f65c9be4ceb0b 2024-12-06T08:20:04,114 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9a7e3c1ee88549e08c94ce5179bc6db5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/9a7e3c1ee88549e08c94ce5179bc6db5 2024-12-06T08:20:04,115 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/fee4b9729f9c409d98bc263b85ad4981 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/A/fee4b9729f9c409d98bc263b85ad4981 2024-12-06T08:20:04,117 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/02f9ac82435c4fcc957c2fb70acb294f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/02f9ac82435c4fcc957c2fb70acb294f 2024-12-06T08:20:04,118 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/089369e9aeae49eca5cff2a30011c1ad to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/089369e9aeae49eca5cff2a30011c1ad 2024-12-06T08:20:04,119 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4ed43642490c4286985e1d17ea053b6d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/B/4ed43642490c4286985e1d17ea053b6d 2024-12-06T08:20:04,121 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/672ec47dc5004a8bac4e54b792984554 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/672ec47dc5004a8bac4e54b792984554 2024-12-06T08:20:04,122 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/da6442f703984deb9f9a3c4c212811be to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/da6442f703984deb9f9a3c4c212811be 2024-12-06T08:20:04,123 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/fa9628195aba4d7bb0c1c9cfd3d19803 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/C/fa9628195aba4d7bb0c1c9cfd3d19803 2024-12-06T08:20:04,125 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/recovered.edits/496.seqid to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d/recovered.edits/496.seqid 2024-12-06T08:20:04,125 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/5310d6bf57bb8b709e1aec9222644a3d 2024-12-06T08:20:04,126 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T08:20:04,127 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=97, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:04,131 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T08:20:04,132 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T08:20:04,133 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=97, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:04,133 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T08:20:04,134 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733473204133"}]},"ts":"9223372036854775807"} 2024-12-06T08:20:04,135 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T08:20:04,135 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5310d6bf57bb8b709e1aec9222644a3d, NAME => 'TestAcidGuarantees,,1733473172154.5310d6bf57bb8b709e1aec9222644a3d.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T08:20:04,135 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T08:20:04,135 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733473204135"}]},"ts":"9223372036854775807"} 2024-12-06T08:20:04,137 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T08:20:04,139 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=97, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:04,139 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 33 msec 2024-12-06T08:20:04,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-06T08:20:04,209 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-12-06T08:20:04,221 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237 (was 242), OpenFileDescriptor=449 (was 465), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=415 (was 424), ProcessCount=11 (was 11), AvailableMemoryMB=7455 (was 7777) 2024-12-06T08:20:04,230 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=415, ProcessCount=11, AvailableMemoryMB=7455 2024-12-06T08:20:04,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T08:20:04,232 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:20:04,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:04,233 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:20:04,233 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:04,234 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:20:04,234 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 98 2024-12-06T08:20:04,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T08:20:04,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742181_1357 (size=963) 2024-12-06T08:20:04,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T08:20:04,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T08:20:04,641 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156 2024-12-06T08:20:04,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742182_1358 (size=53) 2024-12-06T08:20:04,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T08:20:05,047 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:20:05,047 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 501ff14dc75101de2a84e5507808e766, disabling compactions & flushes 2024-12-06T08:20:05,047 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:05,047 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:05,047 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. after waiting 0 ms 2024-12-06T08:20:05,047 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:05,047 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:05,047 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:05,048 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:20:05,049 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733473205048"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733473205048"}]},"ts":"1733473205048"} 2024-12-06T08:20:05,049 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:20:05,050 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:20:05,050 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473205050"}]},"ts":"1733473205050"} 2024-12-06T08:20:05,051 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T08:20:05,055 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, ASSIGN}] 2024-12-06T08:20:05,055 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, ASSIGN 2024-12-06T08:20:05,056 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, ASSIGN; state=OFFLINE, location=b6b797fc3981,38041,1733473111442; forceNewPlan=false, retain=false 2024-12-06T08:20:05,206 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=501ff14dc75101de2a84e5507808e766, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:05,207 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; OpenRegionProcedure 501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:20:05,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T08:20:05,359 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:05,362 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:05,362 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7285): Opening region: {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:20:05,362 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:05,362 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:20:05,362 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7327): checking encryption for 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:05,362 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7330): checking classloading for 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:05,363 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:05,365 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:20:05,365 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 501ff14dc75101de2a84e5507808e766 columnFamilyName A 2024-12-06T08:20:05,365 DEBUG [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:05,365 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(327): Store=501ff14dc75101de2a84e5507808e766/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:20:05,365 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:05,366 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:20:05,366 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 501ff14dc75101de2a84e5507808e766 columnFamilyName B 2024-12-06T08:20:05,367 DEBUG [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:05,367 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(327): Store=501ff14dc75101de2a84e5507808e766/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:20:05,367 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:05,368 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:20:05,368 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 501ff14dc75101de2a84e5507808e766 columnFamilyName C 2024-12-06T08:20:05,368 DEBUG [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:05,368 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(327): Store=501ff14dc75101de2a84e5507808e766/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:20:05,368 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:05,369 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:05,369 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:05,371 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:20:05,371 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1085): writing seq id for 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:05,373 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:20:05,373 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1102): Opened 501ff14dc75101de2a84e5507808e766; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62883871, jitterRate=-0.06295730173587799}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:20:05,374 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1001): Region open journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:05,375 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., pid=100, masterSystemTime=1733473205359 2024-12-06T08:20:05,376 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:05,376 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:05,376 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=501ff14dc75101de2a84e5507808e766, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:05,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-06T08:20:05,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; OpenRegionProcedure 501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 in 170 msec 2024-12-06T08:20:05,379 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-06T08:20:05,379 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, ASSIGN in 323 msec 2024-12-06T08:20:05,380 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:20:05,380 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473205380"}]},"ts":"1733473205380"} 2024-12-06T08:20:05,381 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T08:20:05,383 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:20:05,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1510 sec 2024-12-06T08:20:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-06T08:20:06,339 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-06T08:20:06,341 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f5b2180 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34becda3 2024-12-06T08:20:06,344 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f7f772a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:06,345 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:06,346 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:06,347 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:20:06,348 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:20:06,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T08:20:06,350 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:20:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:06,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742183_1359 (size=999) 2024-12-06T08:20:06,761 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-06T08:20:06,761 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-06T08:20:06,763 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T08:20:06,765 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, REOPEN/MOVE}] 2024-12-06T08:20:06,766 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, REOPEN/MOVE 2024-12-06T08:20:06,766 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=501ff14dc75101de2a84e5507808e766, regionState=CLOSING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:06,767 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T08:20:06,767 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure 501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:20:06,919 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:06,919 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:06,919 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T08:20:06,919 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing 501ff14dc75101de2a84e5507808e766, disabling compactions & flushes 2024-12-06T08:20:06,919 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:06,919 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:06,919 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. after waiting 0 ms 2024-12-06T08:20:06,919 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:06,923 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-06T08:20:06,924 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:06,924 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:06,924 WARN [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionServer(3786): Not adding moved region record: 501ff14dc75101de2a84e5507808e766 to self. 2024-12-06T08:20:06,925 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:06,925 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=501ff14dc75101de2a84e5507808e766, regionState=CLOSED 2024-12-06T08:20:06,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-12-06T08:20:06,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure 501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 in 159 msec 2024-12-06T08:20:06,928 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, REOPEN/MOVE; state=CLOSED, location=b6b797fc3981,38041,1733473111442; forceNewPlan=false, retain=true 2024-12-06T08:20:07,078 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=501ff14dc75101de2a84e5507808e766, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,079 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=103, state=RUNNABLE; OpenRegionProcedure 501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:20:07,231 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,233 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,233 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7285): Opening region: {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:20:07,234 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,234 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:20:07,234 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7327): checking encryption for 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,234 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7330): checking classloading for 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,235 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,236 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:20:07,236 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 501ff14dc75101de2a84e5507808e766 columnFamilyName A 2024-12-06T08:20:07,237 DEBUG [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:07,238 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(327): Store=501ff14dc75101de2a84e5507808e766/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:20:07,238 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,239 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:20:07,239 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 501ff14dc75101de2a84e5507808e766 columnFamilyName B 2024-12-06T08:20:07,239 DEBUG [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:07,239 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(327): Store=501ff14dc75101de2a84e5507808e766/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:20:07,239 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,240 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:20:07,240 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 501ff14dc75101de2a84e5507808e766 columnFamilyName C 2024-12-06T08:20:07,240 DEBUG [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:07,240 INFO [StoreOpener-501ff14dc75101de2a84e5507808e766-1 {}] regionserver.HStore(327): Store=501ff14dc75101de2a84e5507808e766/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:20:07,241 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,241 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,242 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,243 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:20:07,244 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1085): writing seq id for 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,245 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1102): Opened 501ff14dc75101de2a84e5507808e766; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64053440, jitterRate=-0.04552936553955078}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:20:07,245 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1001): Region open journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:07,246 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., pid=105, masterSystemTime=1733473207231 2024-12-06T08:20:07,247 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,247 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,248 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=501ff14dc75101de2a84e5507808e766, regionState=OPEN, openSeqNum=5, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=103 2024-12-06T08:20:07,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=103, state=SUCCESS; OpenRegionProcedure 501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 in 169 msec 2024-12-06T08:20:07,251 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-06T08:20:07,251 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, REOPEN/MOVE in 485 msec 2024-12-06T08:20:07,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-06T08:20:07,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 488 msec 2024-12-06T08:20:07,254 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 903 msec 2024-12-06T08:20:07,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-12-06T08:20:07,256 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df61dc9 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5fe71801 2024-12-06T08:20:07,262 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bf5e2f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:07,263 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3637e4c6 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51f7d511 2024-12-06T08:20:07,265 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75b14fbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:07,266 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72f422b4 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dc42ea6 2024-12-06T08:20:07,268 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62f74604, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:07,269 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2df33cdf to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@117e86d9 2024-12-06T08:20:07,272 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e13594, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:07,272 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09f472e0 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cd96549 2024-12-06T08:20:07,275 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c54a0d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:07,275 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31aea41b to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3875c8c5 2024-12-06T08:20:07,278 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f94d721, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:07,278 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e247aa1 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@801ba40 2024-12-06T08:20:07,280 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319559be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:07,281 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2205f666 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@27539bdc 2024-12-06T08:20:07,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c907e21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:07,284 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6584e9ce to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e3203d9 2024-12-06T08:20:07,286 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61ec0f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:07,287 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37ec8e3b to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@798e7fd4 2024-12-06T08:20:07,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7819b9e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:07,291 DEBUG [hconnection-0xfa08ff4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:07,291 DEBUG [hconnection-0x5a72223d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:07,292 DEBUG [hconnection-0x5f567b45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:07,293 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43244, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:07,293 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:07,293 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43262, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:07,293 DEBUG [hconnection-0x56b8c267-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:07,294 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:07,294 DEBUG [hconnection-0x406e448-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:07,295 DEBUG [hconnection-0x7cc3c4d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:07,295 DEBUG [hconnection-0x3c597578-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:07,295 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43286, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:07,295 DEBUG [hconnection-0x46ab931d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:07,295 DEBUG [hconnection-0x1d9df028-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:07,295 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43296, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:07,296 DEBUG [hconnection-0x6676e98a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:07,296 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43310, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:07,296 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43308, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:07,297 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:07,297 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43314, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:07,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-06T08:20:07,298 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T08:20:07,299 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:07,299 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:07,300 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43320, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:07,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:20:07,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:07,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:07,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:07,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:07,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:07,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:07,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206b3a638e193904b09b4f6955eefeeefcb_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473207301/Put/seqid=0 2024-12-06T08:20:07,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473267359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473267363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473267364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473267364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473267367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742184_1360 (size=12154) 2024-12-06T08:20:07,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T08:20:07,451 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T08:20:07,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:07,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473267468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473267467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473267468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473267468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473267468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T08:20:07,604 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T08:20:07,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:07,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473267671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473267671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473267671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473267671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473267672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,757 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T08:20:07,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:07,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,758 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,771 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:07,775 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206b3a638e193904b09b4f6955eefeeefcb_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b3a638e193904b09b4f6955eefeeefcb_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:07,776 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/fd70e21482364b1c80542454e2464f09, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:07,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/fd70e21482364b1c80542454e2464f09 is 175, key is test_row_0/A:col10/1733473207301/Put/seqid=0 2024-12-06T08:20:07,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742185_1361 (size=30955) 2024-12-06T08:20:07,782 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/fd70e21482364b1c80542454e2464f09 2024-12-06T08:20:07,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/6ed94678c6514da7be0fd62a1783e7a3 is 50, key is test_row_0/B:col10/1733473207301/Put/seqid=0 2024-12-06T08:20:07,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742186_1362 (size=12001) 2024-12-06T08:20:07,814 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/6ed94678c6514da7be0fd62a1783e7a3 2024-12-06T08:20:07,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/444a4a54da26475d9429f7f02ac888cd is 50, key is test_row_0/C:col10/1733473207301/Put/seqid=0 2024-12-06T08:20:07,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742187_1363 (size=12001) 2024-12-06T08:20:07,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T08:20:07,911 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T08:20:07,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:07,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:07,911 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:07,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473267976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473267976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473267977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473267977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:07,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473267978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,063 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T08:20:08,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:08,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:08,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:08,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:08,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:08,216 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T08:20:08,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:08,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:08,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:08,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:08,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:08,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/444a4a54da26475d9429f7f02ac888cd 2024-12-06T08:20:08,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/fd70e21482364b1c80542454e2464f09 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd70e21482364b1c80542454e2464f09 2024-12-06T08:20:08,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd70e21482364b1c80542454e2464f09, entries=150, sequenceid=16, filesize=30.2 K 2024-12-06T08:20:08,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/6ed94678c6514da7be0fd62a1783e7a3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6ed94678c6514da7be0fd62a1783e7a3 2024-12-06T08:20:08,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6ed94678c6514da7be0fd62a1783e7a3, entries=150, sequenceid=16, filesize=11.7 K 2024-12-06T08:20:08,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/444a4a54da26475d9429f7f02ac888cd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/444a4a54da26475d9429f7f02ac888cd 2024-12-06T08:20:08,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/444a4a54da26475d9429f7f02ac888cd, entries=150, sequenceid=16, filesize=11.7 K 2024-12-06T08:20:08,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 501ff14dc75101de2a84e5507808e766 in 930ms, sequenceid=16, compaction requested=false 2024-12-06T08:20:08,265 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-06T08:20:08,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:08,369 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-06T08:20:08,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:08,369 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T08:20:08,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:08,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:08,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:08,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:08,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:08,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:08,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060cf9f71e279e43e884e6e7fa1434c26e_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473207362/Put/seqid=0 2024-12-06T08:20:08,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742188_1364 (size=12154) 2024-12-06T08:20:08,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:08,389 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412060cf9f71e279e43e884e6e7fa1434c26e_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060cf9f71e279e43e884e6e7fa1434c26e_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:08,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/116d61d9b5644d498f7f369cc618fce7, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:08,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/116d61d9b5644d498f7f369cc618fce7 is 175, key is test_row_0/A:col10/1733473207362/Put/seqid=0 2024-12-06T08:20:08,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742189_1365 (size=30955) 2024-12-06T08:20:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T08:20:08,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:08,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:08,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473268485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473268485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473268487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473268487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473268489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473268590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473268591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473268591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473268593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,797 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/116d61d9b5644d498f7f369cc618fce7 2024-12-06T08:20:08,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473268794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473268794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473268795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473268798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:08,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/a7803184b2ad4d2aa96a42081d8d1529 is 50, key is test_row_0/B:col10/1733473207362/Put/seqid=0 2024-12-06T08:20:08,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742190_1366 (size=12001) 2024-12-06T08:20:09,059 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T08:20:09,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:09,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473269100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:09,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:09,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473269100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:09,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:09,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473269101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:09,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:09,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473269103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:09,209 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/a7803184b2ad4d2aa96a42081d8d1529 2024-12-06T08:20:09,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/479fbe739fb7412b844b65ca6dd550c0 is 50, key is test_row_0/C:col10/1733473207362/Put/seqid=0 2024-12-06T08:20:09,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742191_1367 (size=12001) 2024-12-06T08:20:09,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T08:20:09,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:09,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473269490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:09,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473269604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:09,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473269605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:09,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473269606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:09,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473269606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:09,621 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/479fbe739fb7412b844b65ca6dd550c0 2024-12-06T08:20:09,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/116d61d9b5644d498f7f369cc618fce7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/116d61d9b5644d498f7f369cc618fce7 2024-12-06T08:20:09,629 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/116d61d9b5644d498f7f369cc618fce7, entries=150, sequenceid=42, filesize=30.2 K 2024-12-06T08:20:09,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/a7803184b2ad4d2aa96a42081d8d1529 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a7803184b2ad4d2aa96a42081d8d1529 2024-12-06T08:20:09,637 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a7803184b2ad4d2aa96a42081d8d1529, entries=150, sequenceid=42, filesize=11.7 K 2024-12-06T08:20:09,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/479fbe739fb7412b844b65ca6dd550c0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/479fbe739fb7412b844b65ca6dd550c0 2024-12-06T08:20:09,647 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/479fbe739fb7412b844b65ca6dd550c0, entries=150, sequenceid=42, filesize=11.7 K 2024-12-06T08:20:09,649 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 501ff14dc75101de2a84e5507808e766 in 1280ms, sequenceid=42, compaction requested=false 2024-12-06T08:20:09,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:09,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:09,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-06T08:20:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-06T08:20:09,652 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-06T08:20:09,652 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3510 sec 2024-12-06T08:20:09,654 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.3560 sec 2024-12-06T08:20:10,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:10,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:20:10,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:10,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:10,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:10,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:10,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:10,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:10,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206703ca792488b4f5b991f00ba358e276e_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473208488/Put/seqid=0 2024-12-06T08:20:10,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742192_1368 (size=14594) 2024-12-06T08:20:10,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473270647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473270648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473270648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473270653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473270754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473270757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473270758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473270758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473270958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473270960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473270960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:10,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:10,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473270963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,040 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:11,043 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206703ca792488b4f5b991f00ba358e276e_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206703ca792488b4f5b991f00ba358e276e_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:11,044 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/14e1b615378d4b698bea7c98ba2c09cb, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:11,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/14e1b615378d4b698bea7c98ba2c09cb is 175, key is test_row_0/A:col10/1733473208488/Put/seqid=0 2024-12-06T08:20:11,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742193_1369 (size=39549) 2024-12-06T08:20:11,050 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/14e1b615378d4b698bea7c98ba2c09cb 2024-12-06T08:20:11,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/100a8d5e493c4ac3ac37d1bb910c89d5 is 50, key is test_row_0/B:col10/1733473208488/Put/seqid=0 2024-12-06T08:20:11,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742194_1370 (size=12001) 2024-12-06T08:20:11,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/100a8d5e493c4ac3ac37d1bb910c89d5 2024-12-06T08:20:11,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/01ddcdf81a584ae1a72fb7324ccaa229 is 50, key is test_row_0/C:col10/1733473208488/Put/seqid=0 2024-12-06T08:20:11,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742195_1371 (size=12001) 2024-12-06T08:20:11,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473271263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473271265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473271266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473271267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-06T08:20:11,404 INFO [Thread-1625 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-06T08:20:11,405 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-06T08:20:11,407 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T08:20:11,407 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:11,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:11,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/01ddcdf81a584ae1a72fb7324ccaa229 2024-12-06T08:20:11,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/14e1b615378d4b698bea7c98ba2c09cb as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/14e1b615378d4b698bea7c98ba2c09cb 2024-12-06T08:20:11,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/14e1b615378d4b698bea7c98ba2c09cb, entries=200, sequenceid=53, filesize=38.6 K 2024-12-06T08:20:11,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/100a8d5e493c4ac3ac37d1bb910c89d5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/100a8d5e493c4ac3ac37d1bb910c89d5 2024-12-06T08:20:11,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/100a8d5e493c4ac3ac37d1bb910c89d5, entries=150, sequenceid=53, filesize=11.7 K 2024-12-06T08:20:11,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/01ddcdf81a584ae1a72fb7324ccaa229 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/01ddcdf81a584ae1a72fb7324ccaa229 2024-12-06T08:20:11,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/01ddcdf81a584ae1a72fb7324ccaa229, entries=150, sequenceid=53, filesize=11.7 K 2024-12-06T08:20:11,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 501ff14dc75101de2a84e5507808e766 in 892ms, sequenceid=53, compaction requested=true 2024-12-06T08:20:11,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:11,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:11,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:11,502 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:11,502 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:11,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:11,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:11,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:11,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:11,503 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:11,503 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/B is initiating minor compaction (all files) 2024-12-06T08:20:11,503 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/B in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:11,503 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6ed94678c6514da7be0fd62a1783e7a3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a7803184b2ad4d2aa96a42081d8d1529, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/100a8d5e493c4ac3ac37d1bb910c89d5] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=35.2 K 2024-12-06T08:20:11,504 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:11,504 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/A is initiating minor compaction (all files) 2024-12-06T08:20:11,504 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/A in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:11,504 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd70e21482364b1c80542454e2464f09, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/116d61d9b5644d498f7f369cc618fce7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/14e1b615378d4b698bea7c98ba2c09cb] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=99.1 K 2024-12-06T08:20:11,504 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:11,504 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd70e21482364b1c80542454e2464f09, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/116d61d9b5644d498f7f369cc618fce7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/14e1b615378d4b698bea7c98ba2c09cb] 2024-12-06T08:20:11,504 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ed94678c6514da7be0fd62a1783e7a3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733473207301 2024-12-06T08:20:11,505 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a7803184b2ad4d2aa96a42081d8d1529, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733473207362 2024-12-06T08:20:11,505 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd70e21482364b1c80542454e2464f09, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733473207301 2024-12-06T08:20:11,505 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 100a8d5e493c4ac3ac37d1bb910c89d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733473208486 2024-12-06T08:20:11,506 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 116d61d9b5644d498f7f369cc618fce7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733473207362 2024-12-06T08:20:11,506 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14e1b615378d4b698bea7c98ba2c09cb, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733473208486 2024-12-06T08:20:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T08:20:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:11,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T08:20:11,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:11,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:11,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:11,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:11,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:11,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:11,532 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:11,537 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#B#compaction#319 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:11,537 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/d79d4e5e7eae4457b995738a691731b9 is 50, key is test_row_0/B:col10/1733473208488/Put/seqid=0 2024-12-06T08:20:11,541 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120644186efcf99c406da8e4f8dec90de059_501ff14dc75101de2a84e5507808e766 store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:11,543 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120644186efcf99c406da8e4f8dec90de059_501ff14dc75101de2a84e5507808e766, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:11,543 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120644186efcf99c406da8e4f8dec90de059_501ff14dc75101de2a84e5507808e766 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:11,553 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206a4e91b404efc4bbc8ee07d6557b22733_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473210652/Put/seqid=0 2024-12-06T08:20:11,559 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T08:20:11,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:11,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:11,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:11,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:11,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:11,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:11,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473271559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742196_1372 (size=12104) 2024-12-06T08:20:11,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742197_1373 (size=4469) 2024-12-06T08:20:11,575 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#A#compaction#318 average throughput is 0.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:11,575 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/24da69407e4a48ed8450defb28d12029 is 175, key is test_row_0/A:col10/1733473208488/Put/seqid=0 2024-12-06T08:20:11,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742198_1374 (size=14594) 2024-12-06T08:20:11,587 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:11,594 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206a4e91b404efc4bbc8ee07d6557b22733_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206a4e91b404efc4bbc8ee07d6557b22733_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:11,595 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/0a69f860ada44d05857774bb452d2510, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:11,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/0a69f860ada44d05857774bb452d2510 is 175, key is test_row_0/A:col10/1733473210652/Put/seqid=0 2024-12-06T08:20:11,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742199_1375 (size=31058) 2024-12-06T08:20:11,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742200_1376 (size=39549) 2024-12-06T08:20:11,619 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/24da69407e4a48ed8450defb28d12029 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/24da69407e4a48ed8450defb28d12029 2024-12-06T08:20:11,624 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 501ff14dc75101de2a84e5507808e766/A of 501ff14dc75101de2a84e5507808e766 into 24da69407e4a48ed8450defb28d12029(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:11,624 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:11,624 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/A, priority=13, startTime=1733473211502; duration=0sec 2024-12-06T08:20:11,624 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:11,624 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:A 2024-12-06T08:20:11,624 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:11,625 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:11,625 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/C is initiating minor compaction (all files) 2024-12-06T08:20:11,625 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/C in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:11,625 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/444a4a54da26475d9429f7f02ac888cd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/479fbe739fb7412b844b65ca6dd550c0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/01ddcdf81a584ae1a72fb7324ccaa229] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=35.2 K 2024-12-06T08:20:11,626 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 444a4a54da26475d9429f7f02ac888cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733473207301 2024-12-06T08:20:11,626 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 479fbe739fb7412b844b65ca6dd550c0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733473207362 2024-12-06T08:20:11,626 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01ddcdf81a584ae1a72fb7324ccaa229, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733473208486 2024-12-06T08:20:11,634 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#C#compaction#321 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:11,634 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/49cf8c20c9274a35baa23644ca0693f0 is 50, key is test_row_0/C:col10/1733473208488/Put/seqid=0 2024-12-06T08:20:11,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742201_1377 (size=12104) 2024-12-06T08:20:11,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473271665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T08:20:11,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T08:20:11,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:11,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:11,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:11,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:11,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:11,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:11,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473271772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473271774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473271774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473271774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,865 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T08:20:11,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:11,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:11,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:11,866 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:11,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:11,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:11,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:11,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473271870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:11,972 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/d79d4e5e7eae4457b995738a691731b9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/d79d4e5e7eae4457b995738a691731b9 2024-12-06T08:20:11,977 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 501ff14dc75101de2a84e5507808e766/B of 501ff14dc75101de2a84e5507808e766 into d79d4e5e7eae4457b995738a691731b9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:11,977 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:11,977 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/B, priority=13, startTime=1733473211502; duration=0sec 2024-12-06T08:20:11,977 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:11,977 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:B 2024-12-06T08:20:12,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T08:20:12,018 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T08:20:12,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:12,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,019 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/0a69f860ada44d05857774bb452d2510 2024-12-06T08:20:12,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/1995c2c8054440d986e1caeb67910069 is 50, key is test_row_0/B:col10/1733473210652/Put/seqid=0 2024-12-06T08:20:12,044 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/49cf8c20c9274a35baa23644ca0693f0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/49cf8c20c9274a35baa23644ca0693f0 2024-12-06T08:20:12,050 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 501ff14dc75101de2a84e5507808e766/C of 501ff14dc75101de2a84e5507808e766 into 49cf8c20c9274a35baa23644ca0693f0(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:12,050 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:12,050 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/C, priority=13, startTime=1733473211502; duration=0sec 2024-12-06T08:20:12,050 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:12,050 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:C 2024-12-06T08:20:12,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742202_1378 (size=12001) 2024-12-06T08:20:12,170 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T08:20:12,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:12,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:12,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473272176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T08:20:12,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:12,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/1995c2c8054440d986e1caeb67910069 2024-12-06T08:20:12,459 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/122d212b18ba4f929ab6391610210947 is 50, key is test_row_0/C:col10/1733473210652/Put/seqid=0 2024-12-06T08:20:12,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742203_1379 (size=12001) 2024-12-06T08:20:12,476 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T08:20:12,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:12,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T08:20:12,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T08:20:12,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:12,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:12,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473272682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,781 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T08:20:12,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:12,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473272779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:12,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:12,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473272781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:12,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473272783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473272785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,864 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/122d212b18ba4f929ab6391610210947 2024-12-06T08:20:12,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/0a69f860ada44d05857774bb452d2510 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/0a69f860ada44d05857774bb452d2510 2024-12-06T08:20:12,877 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/0a69f860ada44d05857774bb452d2510, entries=200, sequenceid=79, filesize=38.6 K 2024-12-06T08:20:12,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/1995c2c8054440d986e1caeb67910069 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/1995c2c8054440d986e1caeb67910069 2024-12-06T08:20:12,881 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/1995c2c8054440d986e1caeb67910069, entries=150, sequenceid=79, filesize=11.7 K 2024-12-06T08:20:12,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/122d212b18ba4f929ab6391610210947 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/122d212b18ba4f929ab6391610210947 2024-12-06T08:20:12,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/122d212b18ba4f929ab6391610210947, entries=150, sequenceid=79, filesize=11.7 K 2024-12-06T08:20:12,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 501ff14dc75101de2a84e5507808e766 in 1368ms, sequenceid=79, compaction requested=false 2024-12-06T08:20:12,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:12,934 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:12,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-06T08:20:12,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:12,935 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-06T08:20:12,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:12,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:12,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:12,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:12,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:12,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:12,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c0acb49bd054407897e7db8da8fbda99_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473211540/Put/seqid=0 2024-12-06T08:20:12,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742204_1380 (size=12154) 2024-12-06T08:20:13,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:13,350 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c0acb49bd054407897e7db8da8fbda99_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c0acb49bd054407897e7db8da8fbda99_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:13,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/8555304b4aaa4475840b307a4f8b9c02, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:13,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/8555304b4aaa4475840b307a4f8b9c02 is 175, key is test_row_0/A:col10/1733473211540/Put/seqid=0 2024-12-06T08:20:13,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742205_1381 (size=30955) 2024-12-06T08:20:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T08:20:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:13,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:13,757 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/8555304b4aaa4475840b307a4f8b9c02 2024-12-06T08:20:13,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/e7ee40ac08f74a41ae4a8ab0358123af is 50, key is test_row_0/B:col10/1733473211540/Put/seqid=0 2024-12-06T08:20:13,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742206_1382 (size=12001) 2024-12-06T08:20:13,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473273805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:13,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473273909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473274117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,176 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/e7ee40ac08f74a41ae4a8ab0358123af 2024-12-06T08:20:14,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/af1cbec945a649839f7daba1920bb376 is 50, key is test_row_0/C:col10/1733473211540/Put/seqid=0 2024-12-06T08:20:14,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742207_1383 (size=12001) 2024-12-06T08:20:14,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473274424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,588 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/af1cbec945a649839f7daba1920bb376 2024-12-06T08:20:14,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/8555304b4aaa4475840b307a4f8b9c02 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/8555304b4aaa4475840b307a4f8b9c02 2024-12-06T08:20:14,606 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/8555304b4aaa4475840b307a4f8b9c02, entries=150, sequenceid=92, filesize=30.2 K 2024-12-06T08:20:14,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/e7ee40ac08f74a41ae4a8ab0358123af as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e7ee40ac08f74a41ae4a8ab0358123af 2024-12-06T08:20:14,613 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e7ee40ac08f74a41ae4a8ab0358123af, entries=150, sequenceid=92, filesize=11.7 K 2024-12-06T08:20:14,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/af1cbec945a649839f7daba1920bb376 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/af1cbec945a649839f7daba1920bb376 2024-12-06T08:20:14,620 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/af1cbec945a649839f7daba1920bb376, entries=150, sequenceid=92, filesize=11.7 K 2024-12-06T08:20:14,621 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 501ff14dc75101de2a84e5507808e766 in 1687ms, sequenceid=92, compaction requested=true 2024-12-06T08:20:14,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:14,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:14,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-06T08:20:14,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-06T08:20:14,627 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-06T08:20:14,627 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2170 sec 2024-12-06T08:20:14,628 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 3.2220 sec 2024-12-06T08:20:14,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:14,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-06T08:20:14,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:14,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:14,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:14,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:14,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:14,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:14,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120664fa9eaef0de42fc986751b2c7fd6d46_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473214795/Put/seqid=0 2024-12-06T08:20:14,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473274806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473274807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473274808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473274808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742208_1384 (size=14594) 2024-12-06T08:20:14,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473274912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473274915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473274916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473274916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:14,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:14,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473274935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473275117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473275119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473275122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473275123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,217 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:15,220 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120664fa9eaef0de42fc986751b2c7fd6d46_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120664fa9eaef0de42fc986751b2c7fd6d46_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:15,223 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/83434e26683444a29203d704473e1aaf, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:15,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/83434e26683444a29203d704473e1aaf is 175, key is test_row_0/A:col10/1733473214795/Put/seqid=0 2024-12-06T08:20:15,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742209_1385 (size=39549) 2024-12-06T08:20:15,230 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/83434e26683444a29203d704473e1aaf 2024-12-06T08:20:15,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/e30943e090434fad9ac93bb63cb7d0a6 is 50, key is test_row_0/B:col10/1733473214795/Put/seqid=0 2024-12-06T08:20:15,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742210_1386 (size=12001) 2024-12-06T08:20:15,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/e30943e090434fad9ac93bb63cb7d0a6 2024-12-06T08:20:15,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/bd8bf54e79244ed68a638ecff1b4ccc5 is 50, key is test_row_0/C:col10/1733473214795/Put/seqid=0 2024-12-06T08:20:15,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742211_1387 (size=12001) 2024-12-06T08:20:15,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/bd8bf54e79244ed68a638ecff1b4ccc5 2024-12-06T08:20:15,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/83434e26683444a29203d704473e1aaf as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/83434e26683444a29203d704473e1aaf 2024-12-06T08:20:15,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/83434e26683444a29203d704473e1aaf, entries=200, sequenceid=119, filesize=38.6 K 2024-12-06T08:20:15,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/e30943e090434fad9ac93bb63cb7d0a6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e30943e090434fad9ac93bb63cb7d0a6 2024-12-06T08:20:15,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e30943e090434fad9ac93bb63cb7d0a6, entries=150, sequenceid=119, filesize=11.7 K 2024-12-06T08:20:15,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/bd8bf54e79244ed68a638ecff1b4ccc5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/bd8bf54e79244ed68a638ecff1b4ccc5 2024-12-06T08:20:15,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/bd8bf54e79244ed68a638ecff1b4ccc5, entries=150, sequenceid=119, filesize=11.7 K 2024-12-06T08:20:15,338 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 501ff14dc75101de2a84e5507808e766 in 541ms, sequenceid=119, compaction requested=true 2024-12-06T08:20:15,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:15,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:15,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:15,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:15,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:15,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:15,339 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:15,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:15,339 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:15,340 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:15,340 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141111 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:15,340 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/B is initiating minor compaction (all files) 2024-12-06T08:20:15,340 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/A is initiating minor compaction (all files) 2024-12-06T08:20:15,340 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/B in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:15,340 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/A in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:15,340 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/d79d4e5e7eae4457b995738a691731b9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/1995c2c8054440d986e1caeb67910069, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e7ee40ac08f74a41ae4a8ab0358123af, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e30943e090434fad9ac93bb63cb7d0a6] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=47.0 K 2024-12-06T08:20:15,340 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/24da69407e4a48ed8450defb28d12029, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/0a69f860ada44d05857774bb452d2510, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/8555304b4aaa4475840b307a4f8b9c02, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/83434e26683444a29203d704473e1aaf] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=137.8 K 2024-12-06T08:20:15,340 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:15,340 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/24da69407e4a48ed8450defb28d12029, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/0a69f860ada44d05857774bb452d2510, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/8555304b4aaa4475840b307a4f8b9c02, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/83434e26683444a29203d704473e1aaf] 2024-12-06T08:20:15,340 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting d79d4e5e7eae4457b995738a691731b9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733473208486 2024-12-06T08:20:15,340 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24da69407e4a48ed8450defb28d12029, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733473208486 2024-12-06T08:20:15,340 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1995c2c8054440d986e1caeb67910069, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733473210646 2024-12-06T08:20:15,341 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a69f860ada44d05857774bb452d2510, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733473210646 2024-12-06T08:20:15,341 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e7ee40ac08f74a41ae4a8ab0358123af, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733473211540 2024-12-06T08:20:15,341 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e30943e090434fad9ac93bb63cb7d0a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473213779 2024-12-06T08:20:15,341 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8555304b4aaa4475840b307a4f8b9c02, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733473211540 2024-12-06T08:20:15,341 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83434e26683444a29203d704473e1aaf, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473213779 2024-12-06T08:20:15,358 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#B#compaction#330 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:15,359 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/5e4fab9226144a12b288eb9e350540fa is 50, key is test_row_0/B:col10/1733473214795/Put/seqid=0 2024-12-06T08:20:15,362 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:15,380 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120662633c4acb9d4e25957f55e8a0b3fb13_501ff14dc75101de2a84e5507808e766 store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:15,383 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120662633c4acb9d4e25957f55e8a0b3fb13_501ff14dc75101de2a84e5507808e766, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:15,384 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120662633c4acb9d4e25957f55e8a0b3fb13_501ff14dc75101de2a84e5507808e766 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:15,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742212_1388 (size=12241) 2024-12-06T08:20:15,403 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/5e4fab9226144a12b288eb9e350540fa as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/5e4fab9226144a12b288eb9e350540fa 2024-12-06T08:20:15,408 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 501ff14dc75101de2a84e5507808e766/B of 501ff14dc75101de2a84e5507808e766 into 5e4fab9226144a12b288eb9e350540fa(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:15,409 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:15,409 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/B, priority=12, startTime=1733473215338; duration=0sec 2024-12-06T08:20:15,409 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:15,409 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:B 2024-12-06T08:20:15,409 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:15,410 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:15,410 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/C is initiating minor compaction (all files) 2024-12-06T08:20:15,411 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/C in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:15,411 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/49cf8c20c9274a35baa23644ca0693f0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/122d212b18ba4f929ab6391610210947, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/af1cbec945a649839f7daba1920bb376, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/bd8bf54e79244ed68a638ecff1b4ccc5] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=47.0 K 2024-12-06T08:20:15,411 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 49cf8c20c9274a35baa23644ca0693f0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733473208486 2024-12-06T08:20:15,412 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 122d212b18ba4f929ab6391610210947, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733473210646 2024-12-06T08:20:15,412 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting af1cbec945a649839f7daba1920bb376, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733473211540 2024-12-06T08:20:15,413 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting bd8bf54e79244ed68a638ecff1b4ccc5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473213779 2024-12-06T08:20:15,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:20:15,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:15,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:15,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:15,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:15,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:15,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:15,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:15,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742213_1389 (size=4469) 2024-12-06T08:20:15,442 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#A#compaction#331 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:15,442 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/1df55170e37340bb86a5489dd8433062 is 175, key is test_row_0/A:col10/1733473214795/Put/seqid=0 2024-12-06T08:20:15,447 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#C#compaction#332 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:15,448 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/985ff13d26054015b4247fb2166198fb is 50, key is test_row_0/C:col10/1733473214795/Put/seqid=0 2024-12-06T08:20:15,450 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206960b90cad02646bd8b45ffd9154682f9_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473215430/Put/seqid=0 2024-12-06T08:20:15,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742214_1390 (size=31195) 2024-12-06T08:20:15,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742216_1392 (size=12254) 2024-12-06T08:20:15,458 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:15,466 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/1df55170e37340bb86a5489dd8433062 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1df55170e37340bb86a5489dd8433062 2024-12-06T08:20:15,471 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 501ff14dc75101de2a84e5507808e766/A of 501ff14dc75101de2a84e5507808e766 into 1df55170e37340bb86a5489dd8433062(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:15,471 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:15,471 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/A, priority=12, startTime=1733473215338; duration=0sec 2024-12-06T08:20:15,471 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:15,471 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:A 2024-12-06T08:20:15,492 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206960b90cad02646bd8b45ffd9154682f9_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206960b90cad02646bd8b45ffd9154682f9_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:15,494 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/1f6fe4c32d9341fc9500923987a83f03, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:15,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/1f6fe4c32d9341fc9500923987a83f03 is 175, key is test_row_0/A:col10/1733473215430/Put/seqid=0 2024-12-06T08:20:15,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742215_1391 (size=12241) 2024-12-06T08:20:15,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742217_1393 (size=31055) 2024-12-06T08:20:15,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-06T08:20:15,512 INFO [Thread-1625 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-06T08:20:15,513 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/985ff13d26054015b4247fb2166198fb as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/985ff13d26054015b4247fb2166198fb 2024-12-06T08:20:15,513 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:15,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-06T08:20:15,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T08:20:15,515 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:15,516 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:15,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:15,535 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 501ff14dc75101de2a84e5507808e766/C of 501ff14dc75101de2a84e5507808e766 into 985ff13d26054015b4247fb2166198fb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:15,535 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:15,535 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/C, priority=12, startTime=1733473215339; duration=0sec 2024-12-06T08:20:15,535 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:15,535 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:C 2024-12-06T08:20:15,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473275488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473275536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473275536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473275536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T08:20:15,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473275637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473275646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473275646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473275646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,668 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T08:20:15,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:15,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:15,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:15,669 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:15,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:15,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:15,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T08:20:15,821 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T08:20:15,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:15,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:15,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:15,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:15,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:15,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:15,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473275843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473275851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473275852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473275852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,908 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/1f6fe4c32d9341fc9500923987a83f03 2024-12-06T08:20:15,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/6fd0bb94ba8f408d87438e07e699c1ae is 50, key is test_row_0/B:col10/1733473215430/Put/seqid=0 2024-12-06T08:20:15,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742218_1394 (size=12101) 2024-12-06T08:20:15,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/6fd0bb94ba8f408d87438e07e699c1ae 2024-12-06T08:20:15,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/798c43ffc3c5475590424d45a8fb939a is 50, key is test_row_0/C:col10/1733473215430/Put/seqid=0 2024-12-06T08:20:15,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:15,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473275941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742219_1395 (size=12101) 2024-12-06T08:20:15,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/798c43ffc3c5475590424d45a8fb939a 2024-12-06T08:20:15,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/1f6fe4c32d9341fc9500923987a83f03 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1f6fe4c32d9341fc9500923987a83f03 2024-12-06T08:20:15,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1f6fe4c32d9341fc9500923987a83f03, entries=150, sequenceid=132, filesize=30.3 K 2024-12-06T08:20:15,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/6fd0bb94ba8f408d87438e07e699c1ae as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6fd0bb94ba8f408d87438e07e699c1ae 2024-12-06T08:20:15,974 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:15,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T08:20:15,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:15,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:15,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:15,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:15,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:15,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:15,976 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6fd0bb94ba8f408d87438e07e699c1ae, entries=150, sequenceid=132, filesize=11.8 K 2024-12-06T08:20:15,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/798c43ffc3c5475590424d45a8fb939a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/798c43ffc3c5475590424d45a8fb939a 2024-12-06T08:20:15,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/798c43ffc3c5475590424d45a8fb939a, entries=150, sequenceid=132, filesize=11.8 K 2024-12-06T08:20:15,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 501ff14dc75101de2a84e5507808e766 in 554ms, sequenceid=132, compaction requested=false 2024-12-06T08:20:15,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:16,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T08:20:16,127 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-06T08:20:16,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:16,128 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:20:16,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:16,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:16,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:16,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:16,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:16,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:16,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063defb7d075154e72a3111a6635e5bf46_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473215478/Put/seqid=0 2024-12-06T08:20:16,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742220_1396 (size=12304) 2024-12-06T08:20:16,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:16,149 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063defb7d075154e72a3111a6635e5bf46_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063defb7d075154e72a3111a6635e5bf46_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:16,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/4a87ec42663b4db2803b951e9e77a33c, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:16,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/4a87ec42663b4db2803b951e9e77a33c is 175, key is test_row_0/A:col10/1733473215478/Put/seqid=0 2024-12-06T08:20:16,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:16,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:16,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742221_1397 (size=31105) 2024-12-06T08:20:16,162 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/4a87ec42663b4db2803b951e9e77a33c 2024-12-06T08:20:16,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/2332708f64b047a0966cd30ee92af466 is 50, key is test_row_0/B:col10/1733473215478/Put/seqid=0 2024-12-06T08:20:16,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742222_1398 (size=12151) 2024-12-06T08:20:16,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473276194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473276202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473276202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473276203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473276303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473276310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473276310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473276310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473276508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473276515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473276515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473276515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,573 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/2332708f64b047a0966cd30ee92af466 2024-12-06T08:20:16,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a48c8d4f20c7449ab046407d59bd6236 is 50, key is test_row_0/C:col10/1733473215478/Put/seqid=0 2024-12-06T08:20:16,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742223_1399 (size=12151) 2024-12-06T08:20:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T08:20:16,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473276813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473276817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473276820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:16,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473276820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:16,986 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a48c8d4f20c7449ab046407d59bd6236 2024-12-06T08:20:16,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/4a87ec42663b4db2803b951e9e77a33c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/4a87ec42663b4db2803b951e9e77a33c 2024-12-06T08:20:16,993 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/4a87ec42663b4db2803b951e9e77a33c, entries=150, sequenceid=159, filesize=30.4 K 2024-12-06T08:20:16,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/2332708f64b047a0966cd30ee92af466 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/2332708f64b047a0966cd30ee92af466 2024-12-06T08:20:16,997 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/2332708f64b047a0966cd30ee92af466, entries=150, sequenceid=159, filesize=11.9 K 2024-12-06T08:20:16,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a48c8d4f20c7449ab046407d59bd6236 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a48c8d4f20c7449ab046407d59bd6236 2024-12-06T08:20:17,002 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a48c8d4f20c7449ab046407d59bd6236, entries=150, sequenceid=159, filesize=11.9 K 2024-12-06T08:20:17,002 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 501ff14dc75101de2a84e5507808e766 in 874ms, sequenceid=159, compaction requested=true 2024-12-06T08:20:17,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:17,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:17,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-06T08:20:17,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-06T08:20:17,005 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-06T08:20:17,005 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4880 sec 2024-12-06T08:20:17,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.4920 sec 2024-12-06T08:20:17,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:17,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T08:20:17,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:17,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:17,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:17,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:17,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:17,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:17,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063299f0a7798e4ca8bdaa0f60cdb37bfc_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473216201/Put/seqid=0 2024-12-06T08:20:17,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742224_1400 (size=14794) 2024-12-06T08:20:17,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473277360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473277361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473277365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473277366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473277467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473277467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473277468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473277472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-06T08:20:17,619 INFO [Thread-1625 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-06T08:20:17,620 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:17,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-06T08:20:17,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T08:20:17,622 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:17,622 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:17,623 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:17,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473277670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473277671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473277677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473277677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T08:20:17,736 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:17,740 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063299f0a7798e4ca8bdaa0f60cdb37bfc_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063299f0a7798e4ca8bdaa0f60cdb37bfc_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:17,741 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/fd8ca95a52674a3580b3a2a90dfdd52b, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:17,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/fd8ca95a52674a3580b3a2a90dfdd52b is 175, key is test_row_0/A:col10/1733473216201/Put/seqid=0 2024-12-06T08:20:17,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742225_1401 (size=39749) 2024-12-06T08:20:17,750 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/fd8ca95a52674a3580b3a2a90dfdd52b 2024-12-06T08:20:17,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/807858ef47a149afa62cc59308f4a7ca is 50, key is test_row_0/B:col10/1733473216201/Put/seqid=0 2024-12-06T08:20:17,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742226_1402 (size=12151) 2024-12-06T08:20:17,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/807858ef47a149afa62cc59308f4a7ca 2024-12-06T08:20:17,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/6def75300c394c5bbf71fe4228357eb3 is 50, key is test_row_0/C:col10/1733473216201/Put/seqid=0 2024-12-06T08:20:17,774 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-06T08:20:17,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:17,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:17,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:17,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:17,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:17,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:17,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742227_1403 (size=12151) 2024-12-06T08:20:17,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/6def75300c394c5bbf71fe4228357eb3 2024-12-06T08:20:17,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/fd8ca95a52674a3580b3a2a90dfdd52b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd8ca95a52674a3580b3a2a90dfdd52b 2024-12-06T08:20:17,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd8ca95a52674a3580b3a2a90dfdd52b, entries=200, sequenceid=172, filesize=38.8 K 2024-12-06T08:20:17,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/807858ef47a149afa62cc59308f4a7ca as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/807858ef47a149afa62cc59308f4a7ca 2024-12-06T08:20:17,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/807858ef47a149afa62cc59308f4a7ca, entries=150, sequenceid=172, filesize=11.9 K 2024-12-06T08:20:17,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/6def75300c394c5bbf71fe4228357eb3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6def75300c394c5bbf71fe4228357eb3 2024-12-06T08:20:17,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6def75300c394c5bbf71fe4228357eb3, entries=150, sequenceid=172, filesize=11.9 K 2024-12-06T08:20:17,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 501ff14dc75101de2a84e5507808e766 in 481ms, sequenceid=172, compaction requested=true 2024-12-06T08:20:17,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:17,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:17,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:17,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:17,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:17,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:17,801 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:17,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:17,801 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:17,803 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:17,803 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133104 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:17,804 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/A is initiating minor compaction (all files) 2024-12-06T08:20:17,804 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/B is initiating minor compaction (all files) 2024-12-06T08:20:17,804 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/A in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:17,804 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/B in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:17,804 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1df55170e37340bb86a5489dd8433062, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1f6fe4c32d9341fc9500923987a83f03, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/4a87ec42663b4db2803b951e9e77a33c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd8ca95a52674a3580b3a2a90dfdd52b] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=130.0 K 2024-12-06T08:20:17,804 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/5e4fab9226144a12b288eb9e350540fa, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6fd0bb94ba8f408d87438e07e699c1ae, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/2332708f64b047a0966cd30ee92af466, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/807858ef47a149afa62cc59308f4a7ca] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=47.5 K 2024-12-06T08:20:17,804 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:17,804 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1df55170e37340bb86a5489dd8433062, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1f6fe4c32d9341fc9500923987a83f03, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/4a87ec42663b4db2803b951e9e77a33c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd8ca95a52674a3580b3a2a90dfdd52b] 2024-12-06T08:20:17,804 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e4fab9226144a12b288eb9e350540fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473213779 2024-12-06T08:20:17,804 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1df55170e37340bb86a5489dd8433062, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473213779 2024-12-06T08:20:17,805 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fd0bb94ba8f408d87438e07e699c1ae, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733473214807 2024-12-06T08:20:17,805 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f6fe4c32d9341fc9500923987a83f03, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733473214807 2024-12-06T08:20:17,805 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2332708f64b047a0966cd30ee92af466, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733473215478 2024-12-06T08:20:17,805 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a87ec42663b4db2803b951e9e77a33c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733473215478 2024-12-06T08:20:17,805 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 807858ef47a149afa62cc59308f4a7ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733473216201 2024-12-06T08:20:17,806 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd8ca95a52674a3580b3a2a90dfdd52b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733473216201 2024-12-06T08:20:17,816 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:17,819 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#B#compaction#342 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:17,820 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/20a8fc21190a42649f8fe166aa6e439d is 50, key is test_row_0/B:col10/1733473216201/Put/seqid=0 2024-12-06T08:20:17,828 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206bffd1cb39bc44acd96dbdfae5f3e4d7d_501ff14dc75101de2a84e5507808e766 store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:17,831 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206bffd1cb39bc44acd96dbdfae5f3e4d7d_501ff14dc75101de2a84e5507808e766, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:17,831 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206bffd1cb39bc44acd96dbdfae5f3e4d7d_501ff14dc75101de2a84e5507808e766 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:17,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742228_1404 (size=12527) 2024-12-06T08:20:17,850 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/20a8fc21190a42649f8fe166aa6e439d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/20a8fc21190a42649f8fe166aa6e439d 2024-12-06T08:20:17,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742229_1405 (size=4469) 2024-12-06T08:20:17,854 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#A#compaction#343 average throughput is 0.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:17,855 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/cd869bc3e4ac46aca36d117a76f1180c is 175, key is test_row_0/A:col10/1733473216201/Put/seqid=0 2024-12-06T08:20:17,856 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 501ff14dc75101de2a84e5507808e766/B of 501ff14dc75101de2a84e5507808e766 into 20a8fc21190a42649f8fe166aa6e439d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:17,856 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:17,857 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/B, priority=12, startTime=1733473217801; duration=0sec 2024-12-06T08:20:17,857 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:17,857 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:B 2024-12-06T08:20:17,857 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:17,860 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:17,860 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/C is initiating minor compaction (all files) 2024-12-06T08:20:17,860 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/C in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:17,860 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/985ff13d26054015b4247fb2166198fb, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/798c43ffc3c5475590424d45a8fb939a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a48c8d4f20c7449ab046407d59bd6236, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6def75300c394c5bbf71fe4228357eb3] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=47.5 K 2024-12-06T08:20:17,860 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 985ff13d26054015b4247fb2166198fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473213779 2024-12-06T08:20:17,861 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 798c43ffc3c5475590424d45a8fb939a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733473214807 2024-12-06T08:20:17,861 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a48c8d4f20c7449ab046407d59bd6236, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733473215478 2024-12-06T08:20:17,862 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 6def75300c394c5bbf71fe4228357eb3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733473216201 2024-12-06T08:20:17,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742230_1406 (size=31481) 2024-12-06T08:20:17,877 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/cd869bc3e4ac46aca36d117a76f1180c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/cd869bc3e4ac46aca36d117a76f1180c 2024-12-06T08:20:17,880 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#C#compaction#344 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:17,881 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/cdace0d5ae8347998bbf6267fcb3a39f is 50, key is test_row_0/C:col10/1733473216201/Put/seqid=0 2024-12-06T08:20:17,886 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 501ff14dc75101de2a84e5507808e766/A of 501ff14dc75101de2a84e5507808e766 into cd869bc3e4ac46aca36d117a76f1180c(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:17,886 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:17,886 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/A, priority=12, startTime=1733473217801; duration=0sec 2024-12-06T08:20:17,886 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:17,886 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:A 2024-12-06T08:20:17,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742231_1407 (size=12527) 2024-12-06T08:20:17,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T08:20:17,927 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-06T08:20:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:17,928 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T08:20:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:17,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206933ba1f99b34482eb5a36b03858966d4_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473217360/Put/seqid=0 2024-12-06T08:20:17,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742232_1408 (size=12304) 2024-12-06T08:20:17,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:17,943 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206933ba1f99b34482eb5a36b03858966d4_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206933ba1f99b34482eb5a36b03858966d4_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:17,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/e8563f0e2b7444e981392d3719d4c353, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:17,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/e8563f0e2b7444e981392d3719d4c353 is 175, key is test_row_0/A:col10/1733473217360/Put/seqid=0 2024-12-06T08:20:17,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742233_1409 (size=31105) 2024-12-06T08:20:17,950 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/e8563f0e2b7444e981392d3719d4c353 2024-12-06T08:20:17,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:17,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:17,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/9a95cc5935284897ade0234967fd58db is 50, key is test_row_0/B:col10/1733473217360/Put/seqid=0 2024-12-06T08:20:17,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742234_1410 (size=12151) 2024-12-06T08:20:17,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473277985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473277985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473277985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:17,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473277991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:17,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473277991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473278093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473278097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473278098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T08:20:18,296 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/cdace0d5ae8347998bbf6267fcb3a39f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/cdace0d5ae8347998bbf6267fcb3a39f 2024-12-06T08:20:18,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473278296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,300 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 501ff14dc75101de2a84e5507808e766/C of 501ff14dc75101de2a84e5507808e766 into cdace0d5ae8347998bbf6267fcb3a39f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:18,300 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:18,300 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/C, priority=12, startTime=1733473217801; duration=0sec 2024-12-06T08:20:18,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473278300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,300 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:18,301 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:C 2024-12-06T08:20:18,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473278302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,369 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/9a95cc5935284897ade0234967fd58db 2024-12-06T08:20:18,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/ed8f65abe8f84e0898d0e1759856a585 is 50, key is test_row_0/C:col10/1733473217360/Put/seqid=0 2024-12-06T08:20:18,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742235_1411 (size=12151) 2024-12-06T08:20:18,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473278494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473278496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473278601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473278602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:18,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473278604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:18,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T08:20:18,780 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/ed8f65abe8f84e0898d0e1759856a585 2024-12-06T08:20:18,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/e8563f0e2b7444e981392d3719d4c353 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/e8563f0e2b7444e981392d3719d4c353 2024-12-06T08:20:18,789 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/e8563f0e2b7444e981392d3719d4c353, entries=150, sequenceid=197, filesize=30.4 K 2024-12-06T08:20:18,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/9a95cc5935284897ade0234967fd58db as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9a95cc5935284897ade0234967fd58db 2024-12-06T08:20:18,795 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9a95cc5935284897ade0234967fd58db, entries=150, sequenceid=197, filesize=11.9 K 2024-12-06T08:20:18,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/ed8f65abe8f84e0898d0e1759856a585 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/ed8f65abe8f84e0898d0e1759856a585 2024-12-06T08:20:18,800 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/ed8f65abe8f84e0898d0e1759856a585, entries=150, sequenceid=197, filesize=11.9 K 2024-12-06T08:20:18,801 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 501ff14dc75101de2a84e5507808e766 in 873ms, sequenceid=197, compaction requested=false 2024-12-06T08:20:18,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:18,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:18,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-06T08:20:18,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-06T08:20:18,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-06T08:20:18,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1790 sec 2024-12-06T08:20:18,805 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.1840 sec 2024-12-06T08:20:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:19,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T08:20:19,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:19,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:19,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:19,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:19,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:19,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:19,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412067652b274eb66445fad2a5df3b922107a_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473219112/Put/seqid=0 2024-12-06T08:20:19,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742236_1412 (size=14794) 2024-12-06T08:20:19,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473279143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473279145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473279146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473279247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473279251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473279252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473279452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473279453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473279457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473279501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473279505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,541 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:19,544 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412067652b274eb66445fad2a5df3b922107a_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067652b274eb66445fad2a5df3b922107a_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:19,545 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/b96b79efe5774bfa8af80e8a680ced13, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:19,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/b96b79efe5774bfa8af80e8a680ced13 is 175, key is test_row_0/A:col10/1733473219112/Put/seqid=0 2024-12-06T08:20:19,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742237_1413 (size=39749) 2024-12-06T08:20:19,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-06T08:20:19,726 INFO [Thread-1625 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-06T08:20:19,727 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:19,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-06T08:20:19,729 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:19,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T08:20:19,729 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:19,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:19,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473279757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473279759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:19,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473279762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T08:20:19,881 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:19,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T08:20:19,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:19,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:19,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:19,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:19,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:19,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:19,966 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/b96b79efe5774bfa8af80e8a680ced13 2024-12-06T08:20:19,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/6f40ab9e7009477e8a0de834268d3cc5 is 50, key is test_row_0/B:col10/1733473219112/Put/seqid=0 2024-12-06T08:20:19,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742238_1414 (size=12151) 2024-12-06T08:20:20,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T08:20:20,034 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:20,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T08:20:20,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:20,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,035 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,187 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:20,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T08:20:20,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:20,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,188 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:20,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473280262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:20,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:20,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473280264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:20,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:20,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473280267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:20,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T08:20:20,340 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:20,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T08:20:20,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:20,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,341 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,380 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/6f40ab9e7009477e8a0de834268d3cc5 2024-12-06T08:20:20,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/6956e69b8ea945e1bb1a93062f1b2c29 is 50, key is test_row_0/C:col10/1733473219112/Put/seqid=0 2024-12-06T08:20:20,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742239_1415 (size=12151) 2024-12-06T08:20:20,493 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:20,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T08:20:20,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:20,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,645 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:20,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T08:20:20,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:20,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,646 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/6956e69b8ea945e1bb1a93062f1b2c29 2024-12-06T08:20:20,798 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:20,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T08:20:20,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:20,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/b96b79efe5774bfa8af80e8a680ced13 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/b96b79efe5774bfa8af80e8a680ced13 2024-12-06T08:20:20,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:20,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/b96b79efe5774bfa8af80e8a680ced13, entries=200, sequenceid=214, filesize=38.8 K 2024-12-06T08:20:20,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/6f40ab9e7009477e8a0de834268d3cc5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6f40ab9e7009477e8a0de834268d3cc5 2024-12-06T08:20:20,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6f40ab9e7009477e8a0de834268d3cc5, entries=150, sequenceid=214, filesize=11.9 K 2024-12-06T08:20:20,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/6956e69b8ea945e1bb1a93062f1b2c29 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6956e69b8ea945e1bb1a93062f1b2c29 2024-12-06T08:20:20,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6956e69b8ea945e1bb1a93062f1b2c29, entries=150, sequenceid=214, filesize=11.9 K 2024-12-06T08:20:20,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 501ff14dc75101de2a84e5507808e766 in 1707ms, sequenceid=214, compaction requested=true 2024-12-06T08:20:20,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:20,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:20,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:20,820 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:20,820 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:20,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:20,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:20,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:20,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:20,821 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:20,821 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102335 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:20,821 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/B is initiating minor compaction (all files) 2024-12-06T08:20:20,821 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/A is initiating minor compaction (all files) 2024-12-06T08:20:20,821 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/B in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,821 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/A in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,821 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/20a8fc21190a42649f8fe166aa6e439d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9a95cc5935284897ade0234967fd58db, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6f40ab9e7009477e8a0de834268d3cc5] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=36.0 K 2024-12-06T08:20:20,821 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/cd869bc3e4ac46aca36d117a76f1180c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/e8563f0e2b7444e981392d3719d4c353, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/b96b79efe5774bfa8af80e8a680ced13] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=99.9 K 2024-12-06T08:20:20,821 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,821 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/cd869bc3e4ac46aca36d117a76f1180c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/e8563f0e2b7444e981392d3719d4c353, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/b96b79efe5774bfa8af80e8a680ced13] 2024-12-06T08:20:20,822 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 20a8fc21190a42649f8fe166aa6e439d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733473216201 2024-12-06T08:20:20,822 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd869bc3e4ac46aca36d117a76f1180c, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733473216201 2024-12-06T08:20:20,822 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a95cc5935284897ade0234967fd58db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733473217350 2024-12-06T08:20:20,822 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f40ab9e7009477e8a0de834268d3cc5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733473217983 2024-12-06T08:20:20,822 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8563f0e2b7444e981392d3719d4c353, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733473217350 2024-12-06T08:20:20,823 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b96b79efe5774bfa8af80e8a680ced13, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733473217983 2024-12-06T08:20:20,831 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:20,831 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#B#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:20,832 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/76afbcf09f774272b1b5331be053bd17 is 50, key is test_row_0/B:col10/1733473219112/Put/seqid=0 2024-12-06T08:20:20,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T08:20:20,834 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206df72e70b2b5a47ccb92dcd795bf29b04_501ff14dc75101de2a84e5507808e766 store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:20,836 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206df72e70b2b5a47ccb92dcd795bf29b04_501ff14dc75101de2a84e5507808e766, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:20,836 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206df72e70b2b5a47ccb92dcd795bf29b04_501ff14dc75101de2a84e5507808e766 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:20,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742240_1416 (size=12629) 2024-12-06T08:20:20,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742241_1417 (size=4469) 2024-12-06T08:20:20,959 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:20,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-06T08:20:20,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:20,960 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-06T08:20:20,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:20,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:20,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:20,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:20,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:20,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:20,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120640d2df47fbff4eb0a54e2f1e05d58199_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473219145/Put/seqid=0 2024-12-06T08:20:20,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742242_1418 (size=12304) 2024-12-06T08:20:21,248 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#A#compaction#352 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:21,249 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/2256a521dc074fa3a5e97d4a66b5005b is 175, key is test_row_0/A:col10/1733473219112/Put/seqid=0 2024-12-06T08:20:21,252 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/76afbcf09f774272b1b5331be053bd17 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/76afbcf09f774272b1b5331be053bd17 2024-12-06T08:20:21,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742243_1419 (size=31583) 2024-12-06T08:20:21,262 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 501ff14dc75101de2a84e5507808e766/B of 501ff14dc75101de2a84e5507808e766 into 76afbcf09f774272b1b5331be053bd17(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:21,262 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:21,262 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/B, priority=13, startTime=1733473220820; duration=0sec 2024-12-06T08:20:21,262 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:21,262 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:B 2024-12-06T08:20:21,262 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:21,264 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/2256a521dc074fa3a5e97d4a66b5005b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/2256a521dc074fa3a5e97d4a66b5005b 2024-12-06T08:20:21,265 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:21,265 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/C is initiating minor compaction (all files) 2024-12-06T08:20:21,265 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/C in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:21,265 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/cdace0d5ae8347998bbf6267fcb3a39f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/ed8f65abe8f84e0898d0e1759856a585, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6956e69b8ea945e1bb1a93062f1b2c29] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=36.0 K 2024-12-06T08:20:21,266 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting cdace0d5ae8347998bbf6267fcb3a39f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733473216201 2024-12-06T08:20:21,266 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ed8f65abe8f84e0898d0e1759856a585, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733473217350 2024-12-06T08:20:21,267 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 6956e69b8ea945e1bb1a93062f1b2c29, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733473217983 2024-12-06T08:20:21,269 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 501ff14dc75101de2a84e5507808e766/A of 501ff14dc75101de2a84e5507808e766 into 2256a521dc074fa3a5e97d4a66b5005b(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:21,269 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:21,269 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/A, priority=13, startTime=1733473220820; duration=0sec 2024-12-06T08:20:21,269 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:21,269 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:A 2024-12-06T08:20:21,274 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#C#compaction#354 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:21,275 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a82ae67d79254d5983260f86ea048aaa is 50, key is test_row_0/C:col10/1733473219112/Put/seqid=0 2024-12-06T08:20:21,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:21,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:21,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742244_1420 (size=12629) 2024-12-06T08:20:21,289 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a82ae67d79254d5983260f86ea048aaa as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a82ae67d79254d5983260f86ea048aaa 2024-12-06T08:20:21,295 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 501ff14dc75101de2a84e5507808e766/C of 501ff14dc75101de2a84e5507808e766 into a82ae67d79254d5983260f86ea048aaa(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:21,295 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:21,295 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/C, priority=13, startTime=1733473220821; duration=0sec 2024-12-06T08:20:21,295 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:21,295 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:C 2024-12-06T08:20:21,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473281300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473281303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473281304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:21,375 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120640d2df47fbff4eb0a54e2f1e05d58199_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120640d2df47fbff4eb0a54e2f1e05d58199_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:21,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/ca35596a747b474c947246279ec2b976, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:21,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/ca35596a747b474c947246279ec2b976 is 175, key is test_row_0/A:col10/1733473219145/Put/seqid=0 2024-12-06T08:20:21,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742245_1421 (size=31105) 2024-12-06T08:20:21,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473281405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473281408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473281408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473281507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,511 DEBUG [Thread-1615 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:20:21,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473281514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,517 DEBUG [Thread-1619 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:20:21,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473281610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473281610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473281612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,781 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/ca35596a747b474c947246279ec2b976 2024-12-06T08:20:21,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/a135dc44a52244cdbf4895055127dcf9 is 50, key is test_row_0/B:col10/1733473219145/Put/seqid=0 2024-12-06T08:20:21,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742246_1422 (size=12151) 2024-12-06T08:20:21,793 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/a135dc44a52244cdbf4895055127dcf9 2024-12-06T08:20:21,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a0380c7837d5419d9398d3d5b4f3c370 is 50, key is test_row_0/C:col10/1733473219145/Put/seqid=0 2024-12-06T08:20:21,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742247_1423 (size=12151) 2024-12-06T08:20:21,812 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a0380c7837d5419d9398d3d5b4f3c370 2024-12-06T08:20:21,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/ca35596a747b474c947246279ec2b976 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/ca35596a747b474c947246279ec2b976 2024-12-06T08:20:21,820 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/ca35596a747b474c947246279ec2b976, entries=150, sequenceid=234, filesize=30.4 K 2024-12-06T08:20:21,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/a135dc44a52244cdbf4895055127dcf9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a135dc44a52244cdbf4895055127dcf9 2024-12-06T08:20:21,824 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a135dc44a52244cdbf4895055127dcf9, entries=150, sequenceid=234, filesize=11.9 K 2024-12-06T08:20:21,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a0380c7837d5419d9398d3d5b4f3c370 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a0380c7837d5419d9398d3d5b4f3c370 2024-12-06T08:20:21,829 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a0380c7837d5419d9398d3d5b4f3c370, entries=150, sequenceid=234, filesize=11.9 K 2024-12-06T08:20:21,830 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 501ff14dc75101de2a84e5507808e766 in 870ms, sequenceid=234, compaction requested=false 2024-12-06T08:20:21,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:21,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:21,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-06T08:20:21,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-06T08:20:21,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-06T08:20:21,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1020 sec 2024-12-06T08:20:21,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T08:20:21,834 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.1070 sec 2024-12-06T08:20:21,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:21,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-06T08:20:21,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:21,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:21,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:21,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:21,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:21,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:21,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c09581af6ca148de82f7892478f4d558_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473221919/Put/seqid=0 2024-12-06T08:20:21,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742248_1424 (size=14794) 2024-12-06T08:20:21,931 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:21,935 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c09581af6ca148de82f7892478f4d558_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c09581af6ca148de82f7892478f4d558_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:21,935 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/a416b63285b64730b1a6871273287931, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:21,936 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/a416b63285b64730b1a6871273287931 is 175, key is test_row_0/A:col10/1733473221919/Put/seqid=0 2024-12-06T08:20:21,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742249_1425 (size=39749) 2024-12-06T08:20:21,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473281955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473281957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:21,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:21,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473281958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:22,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:22,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473282059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:22,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:22,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473282065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:22,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:22,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473282067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:22,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:22,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473282267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:22,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:22,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473282272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:22,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:22,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473282273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:22,340 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/a416b63285b64730b1a6871273287931 2024-12-06T08:20:22,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/37d9ad4225e04df5bd600d6b54ee4273 is 50, key is test_row_0/B:col10/1733473221919/Put/seqid=0 2024-12-06T08:20:22,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742250_1426 (size=12151) 2024-12-06T08:20:22,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473282572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:22,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:22,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473282577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:22,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473282577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:22,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/37d9ad4225e04df5bd600d6b54ee4273 2024-12-06T08:20:22,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/60f1709c7f24404b90610d4e919c69a4 is 50, key is test_row_0/C:col10/1733473221919/Put/seqid=0 2024-12-06T08:20:22,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742251_1427 (size=12151) 2024-12-06T08:20:22,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/60f1709c7f24404b90610d4e919c69a4 2024-12-06T08:20:22,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/a416b63285b64730b1a6871273287931 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/a416b63285b64730b1a6871273287931 2024-12-06T08:20:22,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/a416b63285b64730b1a6871273287931, entries=200, sequenceid=255, filesize=38.8 K 2024-12-06T08:20:22,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/37d9ad4225e04df5bd600d6b54ee4273 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/37d9ad4225e04df5bd600d6b54ee4273 2024-12-06T08:20:22,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/37d9ad4225e04df5bd600d6b54ee4273, entries=150, sequenceid=255, filesize=11.9 K 2024-12-06T08:20:22,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/60f1709c7f24404b90610d4e919c69a4 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/60f1709c7f24404b90610d4e919c69a4 2024-12-06T08:20:22,778 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/60f1709c7f24404b90610d4e919c69a4, entries=150, sequenceid=255, filesize=11.9 K 2024-12-06T08:20:22,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 501ff14dc75101de2a84e5507808e766 in 859ms, sequenceid=255, compaction requested=true 2024-12-06T08:20:22,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:22,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:22,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:22,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:22,779 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:22,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:22,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:22,779 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:22,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:22,780 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:22,780 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/A is initiating minor compaction (all files) 2024-12-06T08:20:22,780 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/A in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:22,780 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/2256a521dc074fa3a5e97d4a66b5005b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/ca35596a747b474c947246279ec2b976, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/a416b63285b64730b1a6871273287931] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=100.0 K 2024-12-06T08:20:22,780 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:22,780 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/2256a521dc074fa3a5e97d4a66b5005b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/ca35596a747b474c947246279ec2b976, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/a416b63285b64730b1a6871273287931] 2024-12-06T08:20:22,780 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:22,781 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/B is initiating minor compaction (all files) 2024-12-06T08:20:22,781 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/B in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:22,781 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/76afbcf09f774272b1b5331be053bd17, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a135dc44a52244cdbf4895055127dcf9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/37d9ad4225e04df5bd600d6b54ee4273] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=36.1 K 2024-12-06T08:20:22,781 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 76afbcf09f774272b1b5331be053bd17, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733473217983 2024-12-06T08:20:22,781 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2256a521dc074fa3a5e97d4a66b5005b, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733473217983 2024-12-06T08:20:22,782 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a135dc44a52244cdbf4895055127dcf9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733473219142 2024-12-06T08:20:22,782 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca35596a747b474c947246279ec2b976, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733473219142 2024-12-06T08:20:22,782 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 37d9ad4225e04df5bd600d6b54ee4273, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733473221297 2024-12-06T08:20:22,783 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting a416b63285b64730b1a6871273287931, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733473221297 2024-12-06T08:20:22,790 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:22,793 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#B#compaction#361 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:22,794 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/9b76e4492e344ddaa97d1a2295ce37f7 is 50, key is test_row_0/B:col10/1733473221919/Put/seqid=0 2024-12-06T08:20:22,804 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206d2e2db0c195746308c1cc50b47346427_501ff14dc75101de2a84e5507808e766 store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:22,806 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206d2e2db0c195746308c1cc50b47346427_501ff14dc75101de2a84e5507808e766, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:22,806 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d2e2db0c195746308c1cc50b47346427_501ff14dc75101de2a84e5507808e766 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:22,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742252_1428 (size=12731) 2024-12-06T08:20:22,814 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/9b76e4492e344ddaa97d1a2295ce37f7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9b76e4492e344ddaa97d1a2295ce37f7 2024-12-06T08:20:22,820 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 501ff14dc75101de2a84e5507808e766/B of 501ff14dc75101de2a84e5507808e766 into 9b76e4492e344ddaa97d1a2295ce37f7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:22,820 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:22,820 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/B, priority=13, startTime=1733473222779; duration=0sec 2024-12-06T08:20:22,820 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:22,820 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:B 2024-12-06T08:20:22,820 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:22,821 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:22,821 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/C is initiating minor compaction (all files) 2024-12-06T08:20:22,821 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/C in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:22,822 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a82ae67d79254d5983260f86ea048aaa, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a0380c7837d5419d9398d3d5b4f3c370, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/60f1709c7f24404b90610d4e919c69a4] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=36.1 K 2024-12-06T08:20:22,822 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a82ae67d79254d5983260f86ea048aaa, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733473217983 2024-12-06T08:20:22,822 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a0380c7837d5419d9398d3d5b4f3c370, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733473219142 2024-12-06T08:20:22,823 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 60f1709c7f24404b90610d4e919c69a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733473221297 2024-12-06T08:20:22,832 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#C#compaction#362 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:22,832 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/8b173c5ed2c44f9380efb1a5edbb3365 is 50, key is test_row_0/C:col10/1733473221919/Put/seqid=0 2024-12-06T08:20:22,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742254_1430 (size=12731) 2024-12-06T08:20:22,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742253_1429 (size=4469) 2024-12-06T08:20:22,848 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/8b173c5ed2c44f9380efb1a5edbb3365 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/8b173c5ed2c44f9380efb1a5edbb3365 2024-12-06T08:20:22,854 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 501ff14dc75101de2a84e5507808e766/C of 501ff14dc75101de2a84e5507808e766 into 8b173c5ed2c44f9380efb1a5edbb3365(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:22,854 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:22,854 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/C, priority=13, startTime=1733473222779; duration=0sec 2024-12-06T08:20:22,854 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:22,854 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:C 2024-12-06T08:20:23,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:23,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-06T08:20:23,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:23,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:23,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:23,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:23,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:23,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:23,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e8a3a4b28a6b4448ad07cf36b6c6fb1f_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473223079/Put/seqid=0 2024-12-06T08:20:23,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742255_1431 (size=14994) 2024-12-06T08:20:23,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473283109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473283114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473283114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473283216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473283222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473283222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,242 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#A#compaction#360 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:23,243 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/26c97af1e4f341638de2f9953b50393b is 175, key is test_row_0/A:col10/1733473221919/Put/seqid=0 2024-12-06T08:20:23,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742256_1432 (size=31685) 2024-12-06T08:20:23,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473283421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473283425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473283426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,494 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:23,497 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e8a3a4b28a6b4448ad07cf36b6c6fb1f_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e8a3a4b28a6b4448ad07cf36b6c6fb1f_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:23,498 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/539f061e3124476e89a17a4ffd6c6e16, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:23,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/539f061e3124476e89a17a4ffd6c6e16 is 175, key is test_row_0/A:col10/1733473223079/Put/seqid=0 2024-12-06T08:20:23,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742257_1433 (size=39949) 2024-12-06T08:20:23,653 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/26c97af1e4f341638de2f9953b50393b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/26c97af1e4f341638de2f9953b50393b 2024-12-06T08:20:23,657 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 501ff14dc75101de2a84e5507808e766/A of 501ff14dc75101de2a84e5507808e766 into 26c97af1e4f341638de2f9953b50393b(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:23,657 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:23,657 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/A, priority=13, startTime=1733473222779; duration=0sec 2024-12-06T08:20:23,657 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:23,657 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:A 2024-12-06T08:20:23,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473283729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473283731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:23,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473283733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-06T08:20:23,834 INFO [Thread-1625 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-06T08:20:23,835 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:23,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-06T08:20:23,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T08:20:23,837 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:23,838 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:23,838 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:23,903 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/539f061e3124476e89a17a4ffd6c6e16 2024-12-06T08:20:23,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/72e598cc3cd7422491a17c960b5fa271 is 50, key is test_row_0/B:col10/1733473223079/Put/seqid=0 2024-12-06T08:20:23,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742258_1434 (size=12301) 2024-12-06T08:20:23,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T08:20:23,988 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:23,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T08:20:23,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:23,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:23,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:23,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:23,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:23,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:24,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T08:20:24,141 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:24,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T08:20:24,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:24,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:24,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:24,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:24,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:24,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:24,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:24,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473284237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:24,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:24,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473284238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:24,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:24,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473284241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:24,294 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:24,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T08:20:24,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:24,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:24,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:24,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:24,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:24,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/72e598cc3cd7422491a17c960b5fa271 2024-12-06T08:20:24,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/166c5ba87ce74626956ba54d0cd8e981 is 50, key is test_row_0/C:col10/1733473223079/Put/seqid=0 2024-12-06T08:20:24,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742259_1435 (size=12301) 2024-12-06T08:20:24,326 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/166c5ba87ce74626956ba54d0cd8e981 2024-12-06T08:20:24,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/539f061e3124476e89a17a4ffd6c6e16 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/539f061e3124476e89a17a4ffd6c6e16 2024-12-06T08:20:24,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/539f061e3124476e89a17a4ffd6c6e16, entries=200, sequenceid=277, filesize=39.0 K 2024-12-06T08:20:24,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/72e598cc3cd7422491a17c960b5fa271 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/72e598cc3cd7422491a17c960b5fa271 2024-12-06T08:20:24,342 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/72e598cc3cd7422491a17c960b5fa271, entries=150, sequenceid=277, filesize=12.0 K 2024-12-06T08:20:24,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/166c5ba87ce74626956ba54d0cd8e981 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/166c5ba87ce74626956ba54d0cd8e981 2024-12-06T08:20:24,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/166c5ba87ce74626956ba54d0cd8e981, entries=150, sequenceid=277, filesize=12.0 K 2024-12-06T08:20:24,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 501ff14dc75101de2a84e5507808e766 in 1271ms, sequenceid=277, compaction requested=false 2024-12-06T08:20:24,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T08:20:24,446 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:24,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-06T08:20:24,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:24,447 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-06T08:20:24,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:24,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:24,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:24,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:24,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:24,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:24,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ed5d44f13fcf4ae1a702ea9c6d615bf1_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473223113/Put/seqid=0 2024-12-06T08:20:24,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742260_1436 (size=12454) 2024-12-06T08:20:24,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:24,863 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ed5d44f13fcf4ae1a702ea9c6d615bf1_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ed5d44f13fcf4ae1a702ea9c6d615bf1_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:24,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/be9519ce107e4178b39e6c2baa3e6aca, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:24,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/be9519ce107e4178b39e6c2baa3e6aca is 175, key is test_row_0/A:col10/1733473223113/Put/seqid=0 2024-12-06T08:20:24,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742261_1437 (size=31255) 2024-12-06T08:20:24,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T08:20:25,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:25,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:25,269 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/be9519ce107e4178b39e6c2baa3e6aca 2024-12-06T08:20:25,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/8e95aac0143045f6b26338dae39d2971 is 50, key is test_row_0/B:col10/1733473223113/Put/seqid=0 2024-12-06T08:20:25,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473285270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473285271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473285274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742262_1438 (size=12301) 2024-12-06T08:20:25,284 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/8e95aac0143045f6b26338dae39d2971 2024-12-06T08:20:25,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/b010667ebed74d7f9292d63afbe89936 is 50, key is test_row_0/C:col10/1733473223113/Put/seqid=0 2024-12-06T08:20:25,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742263_1439 (size=12301) 2024-12-06T08:20:25,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473285378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473285378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473285378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43314 deadline: 1733473285538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,541 DEBUG [Thread-1615 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:20:25,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43278 deadline: 1733473285553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,556 DEBUG [Thread-1619 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8192 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:20:25,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473285585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473285586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473285586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,701 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/b010667ebed74d7f9292d63afbe89936 2024-12-06T08:20:25,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/be9519ce107e4178b39e6c2baa3e6aca as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/be9519ce107e4178b39e6c2baa3e6aca 2024-12-06T08:20:25,711 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/be9519ce107e4178b39e6c2baa3e6aca, entries=150, sequenceid=295, filesize=30.5 K 2024-12-06T08:20:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/8e95aac0143045f6b26338dae39d2971 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/8e95aac0143045f6b26338dae39d2971 2024-12-06T08:20:25,715 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/8e95aac0143045f6b26338dae39d2971, entries=150, sequenceid=295, filesize=12.0 K 2024-12-06T08:20:25,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/b010667ebed74d7f9292d63afbe89936 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/b010667ebed74d7f9292d63afbe89936 2024-12-06T08:20:25,719 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/b010667ebed74d7f9292d63afbe89936, entries=150, sequenceid=295, filesize=12.0 K 2024-12-06T08:20:25,719 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 501ff14dc75101de2a84e5507808e766 in 1272ms, sequenceid=295, compaction requested=true 2024-12-06T08:20:25,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:25,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:25,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-06T08:20:25,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-06T08:20:25,722 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-06T08:20:25,722 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8830 sec 2024-12-06T08:20:25,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.8870 sec 2024-12-06T08:20:25,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:25,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-06T08:20:25,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:25,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:25,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:25,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:25,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:25,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:25,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120638479aefd02e40ffa8325903bc85e367_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473225269/Put/seqid=0 2024-12-06T08:20:25,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742264_1440 (size=14994) 2024-12-06T08:20:25,913 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:25,917 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120638479aefd02e40ffa8325903bc85e367_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120638479aefd02e40ffa8325903bc85e367_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:25,919 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/82d81a35f3dd4ecb87b564012e25b9c0, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:25,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/82d81a35f3dd4ecb87b564012e25b9c0 is 175, key is test_row_0/A:col10/1733473225269/Put/seqid=0 2024-12-06T08:20:25,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742265_1441 (size=39949) 2024-12-06T08:20:25,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473285920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473285921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473285928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:25,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-06T08:20:25,941 INFO [Thread-1625 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-06T08:20:25,942 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-06T08:20:25,943 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:25,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T08:20:25,943 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:25,944 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:26,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:26,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473286028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:26,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473286028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:26,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473286033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T08:20:26,095 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T08:20:26,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:26,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:26,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473286230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:26,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473286231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:26,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473286235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T08:20:26,248 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T08:20:26,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:26,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,324 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/82d81a35f3dd4ecb87b564012e25b9c0 2024-12-06T08:20:26,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/31b33aca5cd94c0ab4e167d96064831b is 50, key is test_row_0/B:col10/1733473225269/Put/seqid=0 2024-12-06T08:20:26,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742266_1442 (size=12301) 2024-12-06T08:20:26,400 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T08:20:26,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:26,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:26,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473286534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:26,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473286535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:26,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473286540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T08:20:26,553 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T08:20:26,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:26,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T08:20:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/31b33aca5cd94c0ab4e167d96064831b 2024-12-06T08:20:26,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/1f864781e996488caa093c99f27e9e2c is 50, key is test_row_0/C:col10/1733473225269/Put/seqid=0 2024-12-06T08:20:26,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742267_1443 (size=12301) 2024-12-06T08:20:26,859 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:26,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T08:20:26,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:26,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:26,860 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:26,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:27,012 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:27,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T08:20:27,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:27,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:27,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:27,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:27,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:27,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:27,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T08:20:27,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:27,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:27,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1733473287040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:27,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43320 deadline: 1733473287043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:27,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:27,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43310 deadline: 1733473287049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:27,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/1f864781e996488caa093c99f27e9e2c 2024-12-06T08:20:27,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/82d81a35f3dd4ecb87b564012e25b9c0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/82d81a35f3dd4ecb87b564012e25b9c0 2024-12-06T08:20:27,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/82d81a35f3dd4ecb87b564012e25b9c0, entries=200, sequenceid=315, filesize=39.0 K 2024-12-06T08:20:27,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/31b33aca5cd94c0ab4e167d96064831b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/31b33aca5cd94c0ab4e167d96064831b 2024-12-06T08:20:27,164 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:27,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T08:20:27,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:27,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:27,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:27,165 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:27,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:27,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:27,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/31b33aca5cd94c0ab4e167d96064831b, entries=150, sequenceid=315, filesize=12.0 K 2024-12-06T08:20:27,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/1f864781e996488caa093c99f27e9e2c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/1f864781e996488caa093c99f27e9e2c 2024-12-06T08:20:27,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/1f864781e996488caa093c99f27e9e2c, entries=150, sequenceid=315, filesize=12.0 K 2024-12-06T08:20:27,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 501ff14dc75101de2a84e5507808e766 in 1281ms, sequenceid=315, compaction requested=true 2024-12-06T08:20:27,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:27,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:27,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:27,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:27,175 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:27,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:27,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 501ff14dc75101de2a84e5507808e766:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:27,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:27,175 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:27,176 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:27,176 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/B is initiating minor compaction (all files) 2024-12-06T08:20:27,176 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/B in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:27,177 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9b76e4492e344ddaa97d1a2295ce37f7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/72e598cc3cd7422491a17c960b5fa271, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/8e95aac0143045f6b26338dae39d2971, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/31b33aca5cd94c0ab4e167d96064831b] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=48.5 K 2024-12-06T08:20:27,177 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142838 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:27,177 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/A is initiating minor compaction (all files) 2024-12-06T08:20:27,177 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/A in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:27,177 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b76e4492e344ddaa97d1a2295ce37f7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733473221297 2024-12-06T08:20:27,177 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/26c97af1e4f341638de2f9953b50393b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/539f061e3124476e89a17a4ffd6c6e16, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/be9519ce107e4178b39e6c2baa3e6aca, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/82d81a35f3dd4ecb87b564012e25b9c0] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=139.5 K 2024-12-06T08:20:27,177 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:27,177 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/26c97af1e4f341638de2f9953b50393b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/539f061e3124476e89a17a4ffd6c6e16, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/be9519ce107e4178b39e6c2baa3e6aca, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/82d81a35f3dd4ecb87b564012e25b9c0] 2024-12-06T08:20:27,178 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 72e598cc3cd7422491a17c960b5fa271, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1733473221949 2024-12-06T08:20:27,178 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26c97af1e4f341638de2f9953b50393b, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733473221297 2024-12-06T08:20:27,178 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 539f061e3124476e89a17a4ffd6c6e16, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1733473221949 2024-12-06T08:20:27,178 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e95aac0143045f6b26338dae39d2971, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733473223104 2024-12-06T08:20:27,178 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting be9519ce107e4178b39e6c2baa3e6aca, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733473223104 2024-12-06T08:20:27,179 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 31b33aca5cd94c0ab4e167d96064831b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733473225262 2024-12-06T08:20:27,179 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82d81a35f3dd4ecb87b564012e25b9c0, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733473225262 2024-12-06T08:20:27,192 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:27,194 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#B#compaction#373 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:27,195 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/63aa72e914c64828b9e582439050b7bd is 50, key is test_row_0/B:col10/1733473225269/Put/seqid=0 2024-12-06T08:20:27,200 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120661d2c02e689040cfa2f2dec484144b98_501ff14dc75101de2a84e5507808e766 store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:27,203 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120661d2c02e689040cfa2f2dec484144b98_501ff14dc75101de2a84e5507808e766, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:27,203 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120661d2c02e689040cfa2f2dec484144b98_501ff14dc75101de2a84e5507808e766 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:27,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742268_1444 (size=13017) 2024-12-06T08:20:27,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742269_1445 (size=4469) 2024-12-06T08:20:27,234 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#A#compaction#372 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:27,234 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/f2c4473b53d14462bf3b1aaabc8ddfd1 is 175, key is test_row_0/A:col10/1733473225269/Put/seqid=0 2024-12-06T08:20:27,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742270_1446 (size=31971) 2024-12-06T08:20:27,245 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/f2c4473b53d14462bf3b1aaabc8ddfd1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/f2c4473b53d14462bf3b1aaabc8ddfd1 2024-12-06T08:20:27,250 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 501ff14dc75101de2a84e5507808e766/A of 501ff14dc75101de2a84e5507808e766 into f2c4473b53d14462bf3b1aaabc8ddfd1(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:27,250 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:27,250 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/A, priority=12, startTime=1733473227175; duration=0sec 2024-12-06T08:20:27,250 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:27,250 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:A 2024-12-06T08:20:27,250 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:27,251 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:27,251 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 501ff14dc75101de2a84e5507808e766/C is initiating minor compaction (all files) 2024-12-06T08:20:27,251 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 501ff14dc75101de2a84e5507808e766/C in TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:27,251 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/8b173c5ed2c44f9380efb1a5edbb3365, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/166c5ba87ce74626956ba54d0cd8e981, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/b010667ebed74d7f9292d63afbe89936, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/1f864781e996488caa093c99f27e9e2c] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp, totalSize=48.5 K 2024-12-06T08:20:27,252 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b173c5ed2c44f9380efb1a5edbb3365, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733473221297 2024-12-06T08:20:27,252 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 166c5ba87ce74626956ba54d0cd8e981, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1733473221949 2024-12-06T08:20:27,253 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b010667ebed74d7f9292d63afbe89936, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733473223104 2024-12-06T08:20:27,253 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f864781e996488caa093c99f27e9e2c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733473225262 2024-12-06T08:20:27,270 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 501ff14dc75101de2a84e5507808e766#C#compaction#374 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:27,271 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/eaac2c4760674789ada0a87c3238e0c1 is 50, key is test_row_0/C:col10/1733473225269/Put/seqid=0 2024-12-06T08:20:27,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742271_1447 (size=13017) 2024-12-06T08:20:27,293 DEBUG [Thread-1634 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37ec8e3b to 127.0.0.1:65195 2024-12-06T08:20:27,293 DEBUG [Thread-1634 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:27,293 DEBUG [Thread-1626 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31aea41b to 127.0.0.1:65195 2024-12-06T08:20:27,293 DEBUG [Thread-1626 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:27,295 DEBUG [Thread-1628 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e247aa1 to 127.0.0.1:65195 2024-12-06T08:20:27,295 DEBUG [Thread-1628 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:27,298 DEBUG [Thread-1630 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2205f666 to 127.0.0.1:65195 2024-12-06T08:20:27,298 DEBUG [Thread-1630 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:27,298 DEBUG [Thread-1632 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6584e9ce to 127.0.0.1:65195 2024-12-06T08:20:27,299 DEBUG [Thread-1632 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:27,317 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:27,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-06T08:20:27,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:27,318 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-06T08:20:27,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:27,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:27,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:27,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:27,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:27,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:27,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206565afc1873fa48b7abfb2ba61243c336_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473225920/Put/seqid=0 2024-12-06T08:20:27,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742272_1448 (size=12454) 2024-12-06T08:20:27,622 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/63aa72e914c64828b9e582439050b7bd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/63aa72e914c64828b9e582439050b7bd 2024-12-06T08:20:27,625 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 501ff14dc75101de2a84e5507808e766/B of 501ff14dc75101de2a84e5507808e766 into 63aa72e914c64828b9e582439050b7bd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:27,625 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:27,625 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/B, priority=12, startTime=1733473227175; duration=0sec 2024-12-06T08:20:27,625 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:27,625 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:B 2024-12-06T08:20:27,685 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/eaac2c4760674789ada0a87c3238e0c1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/eaac2c4760674789ada0a87c3238e0c1 2024-12-06T08:20:27,688 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 501ff14dc75101de2a84e5507808e766/C of 501ff14dc75101de2a84e5507808e766 into eaac2c4760674789ada0a87c3238e0c1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:27,688 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:27,688 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766., storeName=501ff14dc75101de2a84e5507808e766/C, priority=12, startTime=1733473227175; duration=0sec 2024-12-06T08:20:27,688 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:27,688 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 501ff14dc75101de2a84e5507808e766:C 2024-12-06T08:20:27,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:27,733 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206565afc1873fa48b7abfb2ba61243c336_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206565afc1873fa48b7abfb2ba61243c336_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:27,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/6d3bc0cf10df402b858efbbe72e183d7, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:27,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/6d3bc0cf10df402b858efbbe72e183d7 is 175, key is test_row_0/A:col10/1733473225920/Put/seqid=0 2024-12-06T08:20:27,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742273_1449 (size=31255) 2024-12-06T08:20:28,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T08:20:28,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:28,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. as already flushing 2024-12-06T08:20:28,051 DEBUG [Thread-1621 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2df33cdf to 127.0.0.1:65195 2024-12-06T08:20:28,051 DEBUG [Thread-1621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:28,057 DEBUG [Thread-1623 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09f472e0 to 127.0.0.1:65195 2024-12-06T08:20:28,057 DEBUG [Thread-1623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:28,062 DEBUG [Thread-1617 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3637e4c6 to 127.0.0.1:65195 2024-12-06T08:20:28,062 DEBUG [Thread-1617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:28,138 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/6d3bc0cf10df402b858efbbe72e183d7 2024-12-06T08:20:28,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/e87f9878c53c4d3693aa29b0986d1903 is 50, key is test_row_0/B:col10/1733473225920/Put/seqid=0 2024-12-06T08:20:28,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742274_1450 (size=12301) 2024-12-06T08:20:28,548 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/e87f9878c53c4d3693aa29b0986d1903 2024-12-06T08:20:28,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/ca1d20acc5734137a39634bd1ce8a17c is 50, key is test_row_0/C:col10/1733473225920/Put/seqid=0 2024-12-06T08:20:28,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742275_1451 (size=12301) 2024-12-06T08:20:28,957 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/ca1d20acc5734137a39634bd1ce8a17c 2024-12-06T08:20:28,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/6d3bc0cf10df402b858efbbe72e183d7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/6d3bc0cf10df402b858efbbe72e183d7 2024-12-06T08:20:28,964 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/6d3bc0cf10df402b858efbbe72e183d7, entries=150, sequenceid=332, filesize=30.5 K 2024-12-06T08:20:28,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/e87f9878c53c4d3693aa29b0986d1903 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e87f9878c53c4d3693aa29b0986d1903 2024-12-06T08:20:28,967 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e87f9878c53c4d3693aa29b0986d1903, entries=150, sequenceid=332, filesize=12.0 K 2024-12-06T08:20:28,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/ca1d20acc5734137a39634bd1ce8a17c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/ca1d20acc5734137a39634bd1ce8a17c 2024-12-06T08:20:28,971 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/ca1d20acc5734137a39634bd1ce8a17c, entries=150, sequenceid=332, filesize=12.0 K 2024-12-06T08:20:28,972 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=20.13 KB/20610 for 501ff14dc75101de2a84e5507808e766 in 1653ms, sequenceid=332, compaction requested=false 2024-12-06T08:20:28,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:28,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:28,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-06T08:20:28,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-06T08:20:28,973 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-06T08:20:28,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0290 sec 2024-12-06T08:20:28,974 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 3.0320 sec 2024-12-06T08:20:29,738 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:20:30,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-06T08:20:30,048 INFO [Thread-1625 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-06T08:20:33,650 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd70e21482364b1c80542454e2464f09, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/116d61d9b5644d498f7f369cc618fce7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/14e1b615378d4b698bea7c98ba2c09cb, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/24da69407e4a48ed8450defb28d12029, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/0a69f860ada44d05857774bb452d2510, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/8555304b4aaa4475840b307a4f8b9c02, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/83434e26683444a29203d704473e1aaf, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1df55170e37340bb86a5489dd8433062, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1f6fe4c32d9341fc9500923987a83f03, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/4a87ec42663b4db2803b951e9e77a33c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd8ca95a52674a3580b3a2a90dfdd52b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/cd869bc3e4ac46aca36d117a76f1180c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/e8563f0e2b7444e981392d3719d4c353, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/b96b79efe5774bfa8af80e8a680ced13, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/2256a521dc074fa3a5e97d4a66b5005b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/ca35596a747b474c947246279ec2b976, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/a416b63285b64730b1a6871273287931, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/26c97af1e4f341638de2f9953b50393b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/539f061e3124476e89a17a4ffd6c6e16, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/be9519ce107e4178b39e6c2baa3e6aca, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/82d81a35f3dd4ecb87b564012e25b9c0] to archive 2024-12-06T08:20:33,651 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:20:33,652 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd70e21482364b1c80542454e2464f09 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd70e21482364b1c80542454e2464f09 2024-12-06T08:20:33,653 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/116d61d9b5644d498f7f369cc618fce7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/116d61d9b5644d498f7f369cc618fce7 2024-12-06T08:20:33,654 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/14e1b615378d4b698bea7c98ba2c09cb to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/14e1b615378d4b698bea7c98ba2c09cb 2024-12-06T08:20:33,655 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/24da69407e4a48ed8450defb28d12029 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/24da69407e4a48ed8450defb28d12029 2024-12-06T08:20:33,656 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/0a69f860ada44d05857774bb452d2510 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/0a69f860ada44d05857774bb452d2510 2024-12-06T08:20:33,657 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/8555304b4aaa4475840b307a4f8b9c02 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/8555304b4aaa4475840b307a4f8b9c02 2024-12-06T08:20:33,658 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/83434e26683444a29203d704473e1aaf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/83434e26683444a29203d704473e1aaf 2024-12-06T08:20:33,658 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1df55170e37340bb86a5489dd8433062 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1df55170e37340bb86a5489dd8433062 2024-12-06T08:20:33,659 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1f6fe4c32d9341fc9500923987a83f03 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/1f6fe4c32d9341fc9500923987a83f03 2024-12-06T08:20:33,660 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/4a87ec42663b4db2803b951e9e77a33c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/4a87ec42663b4db2803b951e9e77a33c 2024-12-06T08:20:33,661 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd8ca95a52674a3580b3a2a90dfdd52b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/fd8ca95a52674a3580b3a2a90dfdd52b 2024-12-06T08:20:33,662 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/cd869bc3e4ac46aca36d117a76f1180c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/cd869bc3e4ac46aca36d117a76f1180c 2024-12-06T08:20:33,663 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/e8563f0e2b7444e981392d3719d4c353 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/e8563f0e2b7444e981392d3719d4c353 2024-12-06T08:20:33,664 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/b96b79efe5774bfa8af80e8a680ced13 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/b96b79efe5774bfa8af80e8a680ced13 2024-12-06T08:20:33,665 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/2256a521dc074fa3a5e97d4a66b5005b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/2256a521dc074fa3a5e97d4a66b5005b 2024-12-06T08:20:33,666 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/ca35596a747b474c947246279ec2b976 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/ca35596a747b474c947246279ec2b976 2024-12-06T08:20:33,667 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/a416b63285b64730b1a6871273287931 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/a416b63285b64730b1a6871273287931 2024-12-06T08:20:33,668 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/26c97af1e4f341638de2f9953b50393b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/26c97af1e4f341638de2f9953b50393b 2024-12-06T08:20:33,669 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/539f061e3124476e89a17a4ffd6c6e16 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/539f061e3124476e89a17a4ffd6c6e16 2024-12-06T08:20:33,670 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/be9519ce107e4178b39e6c2baa3e6aca to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/be9519ce107e4178b39e6c2baa3e6aca 2024-12-06T08:20:33,671 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/82d81a35f3dd4ecb87b564012e25b9c0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/82d81a35f3dd4ecb87b564012e25b9c0 2024-12-06T08:20:33,676 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6ed94678c6514da7be0fd62a1783e7a3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a7803184b2ad4d2aa96a42081d8d1529, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/d79d4e5e7eae4457b995738a691731b9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/100a8d5e493c4ac3ac37d1bb910c89d5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/1995c2c8054440d986e1caeb67910069, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e7ee40ac08f74a41ae4a8ab0358123af, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/5e4fab9226144a12b288eb9e350540fa, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e30943e090434fad9ac93bb63cb7d0a6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6fd0bb94ba8f408d87438e07e699c1ae, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/2332708f64b047a0966cd30ee92af466, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/20a8fc21190a42649f8fe166aa6e439d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/807858ef47a149afa62cc59308f4a7ca, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9a95cc5935284897ade0234967fd58db, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/76afbcf09f774272b1b5331be053bd17, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6f40ab9e7009477e8a0de834268d3cc5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a135dc44a52244cdbf4895055127dcf9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9b76e4492e344ddaa97d1a2295ce37f7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/37d9ad4225e04df5bd600d6b54ee4273, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/72e598cc3cd7422491a17c960b5fa271, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/8e95aac0143045f6b26338dae39d2971, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/31b33aca5cd94c0ab4e167d96064831b] to archive 2024-12-06T08:20:33,676 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:20:33,678 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6ed94678c6514da7be0fd62a1783e7a3 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6ed94678c6514da7be0fd62a1783e7a3 2024-12-06T08:20:33,679 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a7803184b2ad4d2aa96a42081d8d1529 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a7803184b2ad4d2aa96a42081d8d1529 2024-12-06T08:20:33,680 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/d79d4e5e7eae4457b995738a691731b9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/d79d4e5e7eae4457b995738a691731b9 2024-12-06T08:20:33,681 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/100a8d5e493c4ac3ac37d1bb910c89d5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/100a8d5e493c4ac3ac37d1bb910c89d5 2024-12-06T08:20:33,682 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/1995c2c8054440d986e1caeb67910069 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/1995c2c8054440d986e1caeb67910069 2024-12-06T08:20:33,682 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e7ee40ac08f74a41ae4a8ab0358123af to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e7ee40ac08f74a41ae4a8ab0358123af 2024-12-06T08:20:33,683 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/5e4fab9226144a12b288eb9e350540fa to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/5e4fab9226144a12b288eb9e350540fa 2024-12-06T08:20:33,684 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e30943e090434fad9ac93bb63cb7d0a6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e30943e090434fad9ac93bb63cb7d0a6 2024-12-06T08:20:33,685 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6fd0bb94ba8f408d87438e07e699c1ae to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6fd0bb94ba8f408d87438e07e699c1ae 2024-12-06T08:20:33,686 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/2332708f64b047a0966cd30ee92af466 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/2332708f64b047a0966cd30ee92af466 2024-12-06T08:20:33,686 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/20a8fc21190a42649f8fe166aa6e439d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/20a8fc21190a42649f8fe166aa6e439d 2024-12-06T08:20:33,687 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/807858ef47a149afa62cc59308f4a7ca to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/807858ef47a149afa62cc59308f4a7ca 2024-12-06T08:20:33,688 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9a95cc5935284897ade0234967fd58db to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9a95cc5935284897ade0234967fd58db 2024-12-06T08:20:33,689 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/76afbcf09f774272b1b5331be053bd17 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/76afbcf09f774272b1b5331be053bd17 2024-12-06T08:20:33,690 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6f40ab9e7009477e8a0de834268d3cc5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/6f40ab9e7009477e8a0de834268d3cc5 2024-12-06T08:20:33,690 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a135dc44a52244cdbf4895055127dcf9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/a135dc44a52244cdbf4895055127dcf9 2024-12-06T08:20:33,691 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9b76e4492e344ddaa97d1a2295ce37f7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/9b76e4492e344ddaa97d1a2295ce37f7 2024-12-06T08:20:33,692 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/37d9ad4225e04df5bd600d6b54ee4273 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/37d9ad4225e04df5bd600d6b54ee4273 2024-12-06T08:20:33,693 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/72e598cc3cd7422491a17c960b5fa271 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/72e598cc3cd7422491a17c960b5fa271 2024-12-06T08:20:33,694 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/8e95aac0143045f6b26338dae39d2971 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/8e95aac0143045f6b26338dae39d2971 2024-12-06T08:20:33,695 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/31b33aca5cd94c0ab4e167d96064831b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/31b33aca5cd94c0ab4e167d96064831b 2024-12-06T08:20:33,697 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/444a4a54da26475d9429f7f02ac888cd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/479fbe739fb7412b844b65ca6dd550c0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/49cf8c20c9274a35baa23644ca0693f0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/01ddcdf81a584ae1a72fb7324ccaa229, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/122d212b18ba4f929ab6391610210947, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/af1cbec945a649839f7daba1920bb376, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/985ff13d26054015b4247fb2166198fb, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/bd8bf54e79244ed68a638ecff1b4ccc5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/798c43ffc3c5475590424d45a8fb939a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a48c8d4f20c7449ab046407d59bd6236, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/cdace0d5ae8347998bbf6267fcb3a39f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6def75300c394c5bbf71fe4228357eb3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/ed8f65abe8f84e0898d0e1759856a585, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a82ae67d79254d5983260f86ea048aaa, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6956e69b8ea945e1bb1a93062f1b2c29, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a0380c7837d5419d9398d3d5b4f3c370, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/8b173c5ed2c44f9380efb1a5edbb3365, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/60f1709c7f24404b90610d4e919c69a4, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/166c5ba87ce74626956ba54d0cd8e981, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/b010667ebed74d7f9292d63afbe89936, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/1f864781e996488caa093c99f27e9e2c] to archive 2024-12-06T08:20:33,698 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:20:33,699 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/444a4a54da26475d9429f7f02ac888cd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/444a4a54da26475d9429f7f02ac888cd 2024-12-06T08:20:33,700 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/479fbe739fb7412b844b65ca6dd550c0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/479fbe739fb7412b844b65ca6dd550c0 2024-12-06T08:20:33,701 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/49cf8c20c9274a35baa23644ca0693f0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/49cf8c20c9274a35baa23644ca0693f0 2024-12-06T08:20:33,701 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/01ddcdf81a584ae1a72fb7324ccaa229 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/01ddcdf81a584ae1a72fb7324ccaa229 2024-12-06T08:20:33,702 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/122d212b18ba4f929ab6391610210947 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/122d212b18ba4f929ab6391610210947 2024-12-06T08:20:33,703 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/af1cbec945a649839f7daba1920bb376 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/af1cbec945a649839f7daba1920bb376 2024-12-06T08:20:33,704 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/985ff13d26054015b4247fb2166198fb to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/985ff13d26054015b4247fb2166198fb 2024-12-06T08:20:33,705 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/bd8bf54e79244ed68a638ecff1b4ccc5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/bd8bf54e79244ed68a638ecff1b4ccc5 2024-12-06T08:20:33,706 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/798c43ffc3c5475590424d45a8fb939a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/798c43ffc3c5475590424d45a8fb939a 2024-12-06T08:20:33,707 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a48c8d4f20c7449ab046407d59bd6236 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a48c8d4f20c7449ab046407d59bd6236 2024-12-06T08:20:33,708 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/cdace0d5ae8347998bbf6267fcb3a39f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/cdace0d5ae8347998bbf6267fcb3a39f 2024-12-06T08:20:33,709 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6def75300c394c5bbf71fe4228357eb3 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6def75300c394c5bbf71fe4228357eb3 2024-12-06T08:20:33,709 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/ed8f65abe8f84e0898d0e1759856a585 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/ed8f65abe8f84e0898d0e1759856a585 2024-12-06T08:20:33,710 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a82ae67d79254d5983260f86ea048aaa to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a82ae67d79254d5983260f86ea048aaa 2024-12-06T08:20:33,711 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6956e69b8ea945e1bb1a93062f1b2c29 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/6956e69b8ea945e1bb1a93062f1b2c29 2024-12-06T08:20:33,712 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a0380c7837d5419d9398d3d5b4f3c370 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a0380c7837d5419d9398d3d5b4f3c370 2024-12-06T08:20:33,713 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/8b173c5ed2c44f9380efb1a5edbb3365 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/8b173c5ed2c44f9380efb1a5edbb3365 2024-12-06T08:20:33,714 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/60f1709c7f24404b90610d4e919c69a4 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/60f1709c7f24404b90610d4e919c69a4 2024-12-06T08:20:33,714 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/166c5ba87ce74626956ba54d0cd8e981 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/166c5ba87ce74626956ba54d0cd8e981 2024-12-06T08:20:33,715 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/b010667ebed74d7f9292d63afbe89936 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/b010667ebed74d7f9292d63afbe89936 2024-12-06T08:20:33,716 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/1f864781e996488caa093c99f27e9e2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/1f864781e996488caa093c99f27e9e2c 2024-12-06T08:20:35,545 DEBUG [Thread-1615 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df61dc9 to 127.0.0.1:65195 2024-12-06T08:20:35,546 DEBUG [Thread-1615 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:35,650 DEBUG [Thread-1619 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72f422b4 to 127.0.0.1:65195 2024-12-06T08:20:35,650 DEBUG [Thread-1619 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 31 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2526 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7577 rows 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2554 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7662 rows 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2541 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7623 rows 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2543 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7625 rows 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2548 2024-12-06T08:20:35,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7642 rows 2024-12-06T08:20:35,650 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:20:35,650 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f5b2180 to 127.0.0.1:65195 2024-12-06T08:20:35,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:20:35,653 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T08:20:35,654 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T08:20:35,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T08:20:35,657 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473235657"}]},"ts":"1733473235657"} 2024-12-06T08:20:35,658 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T08:20:35,661 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T08:20:35,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T08:20:35,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, UNASSIGN}] 2024-12-06T08:20:35,664 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, UNASSIGN 2024-12-06T08:20:35,665 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=501ff14dc75101de2a84e5507808e766, regionState=CLOSING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:35,666 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T08:20:35,666 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; CloseRegionProcedure 501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:20:35,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T08:20:35,817 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:35,818 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(124): Close 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:35,818 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T08:20:35,818 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1681): Closing 501ff14dc75101de2a84e5507808e766, disabling compactions & flushes 2024-12-06T08:20:35,818 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:35,818 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:35,818 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. after waiting 0 ms 2024-12-06T08:20:35,818 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:35,818 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(2837): Flushing 501ff14dc75101de2a84e5507808e766 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-06T08:20:35,818 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=A 2024-12-06T08:20:35,818 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:35,818 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=B 2024-12-06T08:20:35,818 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:35,819 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 501ff14dc75101de2a84e5507808e766, store=C 2024-12-06T08:20:35,819 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:35,823 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068ed3f7ed22d9457d9ae48a53b198e956_501ff14dc75101de2a84e5507808e766 is 50, key is test_row_0/A:col10/1733473235648/Put/seqid=0 2024-12-06T08:20:35,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742276_1452 (size=12454) 2024-12-06T08:20:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T08:20:36,227 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:36,231 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068ed3f7ed22d9457d9ae48a53b198e956_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068ed3f7ed22d9457d9ae48a53b198e956_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:36,231 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/cd31afc053044cc2ba0e42e96dfb61ca, store: [table=TestAcidGuarantees family=A region=501ff14dc75101de2a84e5507808e766] 2024-12-06T08:20:36,232 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/cd31afc053044cc2ba0e42e96dfb61ca is 175, key is test_row_0/A:col10/1733473235648/Put/seqid=0 2024-12-06T08:20:36,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742277_1453 (size=31255) 2024-12-06T08:20:36,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T08:20:36,636 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=342, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/cd31afc053044cc2ba0e42e96dfb61ca 2024-12-06T08:20:36,641 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/20092842d5af47ea97af6535e9766ef7 is 50, key is test_row_0/B:col10/1733473235648/Put/seqid=0 2024-12-06T08:20:36,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742278_1454 (size=12301) 2024-12-06T08:20:36,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T08:20:37,045 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/20092842d5af47ea97af6535e9766ef7 2024-12-06T08:20:37,050 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a3f2ffa4ffc74e2eb7d3ab658442d93f is 50, key is test_row_0/C:col10/1733473235648/Put/seqid=0 2024-12-06T08:20:37,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742279_1455 (size=12301) 2024-12-06T08:20:37,454 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a3f2ffa4ffc74e2eb7d3ab658442d93f 2024-12-06T08:20:37,458 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/A/cd31afc053044cc2ba0e42e96dfb61ca as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/cd31afc053044cc2ba0e42e96dfb61ca 2024-12-06T08:20:37,460 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/cd31afc053044cc2ba0e42e96dfb61ca, entries=150, sequenceid=342, filesize=30.5 K 2024-12-06T08:20:37,461 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/B/20092842d5af47ea97af6535e9766ef7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/20092842d5af47ea97af6535e9766ef7 2024-12-06T08:20:37,464 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/20092842d5af47ea97af6535e9766ef7, entries=150, sequenceid=342, filesize=12.0 K 2024-12-06T08:20:37,464 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/.tmp/C/a3f2ffa4ffc74e2eb7d3ab658442d93f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a3f2ffa4ffc74e2eb7d3ab658442d93f 2024-12-06T08:20:37,467 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a3f2ffa4ffc74e2eb7d3ab658442d93f, entries=150, sequenceid=342, filesize=12.0 K 2024-12-06T08:20:37,468 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 501ff14dc75101de2a84e5507808e766 in 1650ms, sequenceid=342, compaction requested=true 2024-12-06T08:20:37,472 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/recovered.edits/345.seqid, newMaxSeqId=345, maxSeqId=4 2024-12-06T08:20:37,472 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766. 2024-12-06T08:20:37,472 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1635): Region close journal for 501ff14dc75101de2a84e5507808e766: 2024-12-06T08:20:37,474 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(170): Closed 501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,474 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=501ff14dc75101de2a84e5507808e766, regionState=CLOSED 2024-12-06T08:20:37,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-06T08:20:37,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseRegionProcedure 501ff14dc75101de2a84e5507808e766, server=b6b797fc3981,38041,1733473111442 in 1.8090 sec 2024-12-06T08:20:37,477 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-12-06T08:20:37,477 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=501ff14dc75101de2a84e5507808e766, UNASSIGN in 1.8130 sec 2024-12-06T08:20:37,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-06T08:20:37,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8160 sec 2024-12-06T08:20:37,479 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473237479"}]},"ts":"1733473237479"} 2024-12-06T08:20:37,479 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T08:20:37,481 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T08:20:37,482 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8280 sec 2024-12-06T08:20:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-06T08:20:37,760 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-06T08:20:37,760 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T08:20:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:37,762 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:37,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-06T08:20:37,762 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=124, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:37,763 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,765 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/recovered.edits] 2024-12-06T08:20:37,767 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/6d3bc0cf10df402b858efbbe72e183d7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/6d3bc0cf10df402b858efbbe72e183d7 2024-12-06T08:20:37,767 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/cd31afc053044cc2ba0e42e96dfb61ca to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/cd31afc053044cc2ba0e42e96dfb61ca 2024-12-06T08:20:37,768 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/f2c4473b53d14462bf3b1aaabc8ddfd1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/A/f2c4473b53d14462bf3b1aaabc8ddfd1 2024-12-06T08:20:37,770 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/20092842d5af47ea97af6535e9766ef7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/20092842d5af47ea97af6535e9766ef7 2024-12-06T08:20:37,771 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/63aa72e914c64828b9e582439050b7bd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/63aa72e914c64828b9e582439050b7bd 2024-12-06T08:20:37,771 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e87f9878c53c4d3693aa29b0986d1903 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/B/e87f9878c53c4d3693aa29b0986d1903 2024-12-06T08:20:37,773 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a3f2ffa4ffc74e2eb7d3ab658442d93f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/a3f2ffa4ffc74e2eb7d3ab658442d93f 2024-12-06T08:20:37,773 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/ca1d20acc5734137a39634bd1ce8a17c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/ca1d20acc5734137a39634bd1ce8a17c 2024-12-06T08:20:37,774 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/eaac2c4760674789ada0a87c3238e0c1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/C/eaac2c4760674789ada0a87c3238e0c1 2024-12-06T08:20:37,776 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/recovered.edits/345.seqid to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766/recovered.edits/345.seqid 2024-12-06T08:20:37,777 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,777 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T08:20:37,777 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T08:20:37,778 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-06T08:20:37,780 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060cf9f71e279e43e884e6e7fa1434c26e_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412060cf9f71e279e43e884e6e7fa1434c26e_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,781 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063299f0a7798e4ca8bdaa0f60cdb37bfc_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063299f0a7798e4ca8bdaa0f60cdb37bfc_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,782 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120638479aefd02e40ffa8325903bc85e367_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120638479aefd02e40ffa8325903bc85e367_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,783 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063defb7d075154e72a3111a6635e5bf46_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063defb7d075154e72a3111a6635e5bf46_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,783 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120640d2df47fbff4eb0a54e2f1e05d58199_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120640d2df47fbff4eb0a54e2f1e05d58199_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,784 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206565afc1873fa48b7abfb2ba61243c336_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206565afc1873fa48b7abfb2ba61243c336_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,785 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120664fa9eaef0de42fc986751b2c7fd6d46_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120664fa9eaef0de42fc986751b2c7fd6d46_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,786 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206703ca792488b4f5b991f00ba358e276e_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206703ca792488b4f5b991f00ba358e276e_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,786 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067652b274eb66445fad2a5df3b922107a_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067652b274eb66445fad2a5df3b922107a_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,787 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068ed3f7ed22d9457d9ae48a53b198e956_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068ed3f7ed22d9457d9ae48a53b198e956_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,788 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206933ba1f99b34482eb5a36b03858966d4_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206933ba1f99b34482eb5a36b03858966d4_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,789 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206960b90cad02646bd8b45ffd9154682f9_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206960b90cad02646bd8b45ffd9154682f9_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,789 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206a4e91b404efc4bbc8ee07d6557b22733_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206a4e91b404efc4bbc8ee07d6557b22733_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,790 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b3a638e193904b09b4f6955eefeeefcb_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b3a638e193904b09b4f6955eefeeefcb_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,791 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c09581af6ca148de82f7892478f4d558_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c09581af6ca148de82f7892478f4d558_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,792 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c0acb49bd054407897e7db8da8fbda99_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c0acb49bd054407897e7db8da8fbda99_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,792 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e8a3a4b28a6b4448ad07cf36b6c6fb1f_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e8a3a4b28a6b4448ad07cf36b6c6fb1f_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,793 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ed5d44f13fcf4ae1a702ea9c6d615bf1_501ff14dc75101de2a84e5507808e766 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206ed5d44f13fcf4ae1a702ea9c6d615bf1_501ff14dc75101de2a84e5507808e766 2024-12-06T08:20:37,794 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T08:20:37,795 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=124, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:37,797 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T08:20:37,798 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T08:20:37,799 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=124, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:37,799 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T08:20:37,799 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733473237799"}]},"ts":"9223372036854775807"} 2024-12-06T08:20:37,800 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T08:20:37,800 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 501ff14dc75101de2a84e5507808e766, NAME => 'TestAcidGuarantees,,1733473204231.501ff14dc75101de2a84e5507808e766.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T08:20:37,801 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T08:20:37,801 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733473237801"}]},"ts":"9223372036854775807"} 2024-12-06T08:20:37,802 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T08:20:37,804 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=124, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:37,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 44 msec 2024-12-06T08:20:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-06T08:20:37,863 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-06T08:20:37,872 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237 (was 237), OpenFileDescriptor=445 (was 449), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=394 (was 415), ProcessCount=9 (was 11), AvailableMemoryMB=8484 (was 7455) - AvailableMemoryMB LEAK? - 2024-12-06T08:20:37,880 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=237, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=394, ProcessCount=9, AvailableMemoryMB=8484 2024-12-06T08:20:37,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T08:20:37,881 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:20:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T08:20:37,883 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:20:37,883 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:37,883 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 125 2024-12-06T08:20:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-06T08:20:37,884 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:20:37,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742280_1456 (size=963) 2024-12-06T08:20:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-06T08:20:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-06T08:20:38,290 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156 2024-12-06T08:20:38,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742281_1457 (size=53) 2024-12-06T08:20:38,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-06T08:20:38,696 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:20:38,696 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 8e01d2c9a20bbffd8abe9402655a3d81, disabling compactions & flushes 2024-12-06T08:20:38,696 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:38,696 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:38,696 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. after waiting 0 ms 2024-12-06T08:20:38,696 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:38,696 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:38,696 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:38,697 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:20:38,697 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733473238697"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733473238697"}]},"ts":"1733473238697"} 2024-12-06T08:20:38,698 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:20:38,699 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:20:38,699 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473238699"}]},"ts":"1733473238699"} 2024-12-06T08:20:38,700 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T08:20:38,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e01d2c9a20bbffd8abe9402655a3d81, ASSIGN}] 2024-12-06T08:20:38,704 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e01d2c9a20bbffd8abe9402655a3d81, ASSIGN 2024-12-06T08:20:38,704 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e01d2c9a20bbffd8abe9402655a3d81, ASSIGN; state=OFFLINE, location=b6b797fc3981,38041,1733473111442; forceNewPlan=false, retain=false 2024-12-06T08:20:38,855 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=8e01d2c9a20bbffd8abe9402655a3d81, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:38,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; OpenRegionProcedure 8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:20:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-06T08:20:39,008 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:39,010 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:39,010 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7285): Opening region: {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:20:39,010 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:39,010 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:20:39,011 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7327): checking encryption for 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:39,011 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7330): checking classloading for 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:39,012 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:39,014 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:20:39,014 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e01d2c9a20bbffd8abe9402655a3d81 columnFamilyName A 2024-12-06T08:20:39,014 DEBUG [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:39,015 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] regionserver.HStore(327): Store=8e01d2c9a20bbffd8abe9402655a3d81/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:20:39,015 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:39,016 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:20:39,016 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e01d2c9a20bbffd8abe9402655a3d81 columnFamilyName B 2024-12-06T08:20:39,017 DEBUG [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:39,017 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] regionserver.HStore(327): Store=8e01d2c9a20bbffd8abe9402655a3d81/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:20:39,017 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:39,018 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:20:39,018 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e01d2c9a20bbffd8abe9402655a3d81 columnFamilyName C 2024-12-06T08:20:39,018 DEBUG [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:20:39,018 INFO [StoreOpener-8e01d2c9a20bbffd8abe9402655a3d81-1 {}] regionserver.HStore(327): Store=8e01d2c9a20bbffd8abe9402655a3d81/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:20:39,019 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:39,019 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:39,020 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:39,021 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:20:39,022 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1085): writing seq id for 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:39,023 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:20:39,023 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1102): Opened 8e01d2c9a20bbffd8abe9402655a3d81; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65957111, jitterRate=-0.01716245710849762}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:20:39,024 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1001): Region open journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:39,024 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., pid=127, masterSystemTime=1733473239007 2024-12-06T08:20:39,026 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:39,026 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:39,026 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=8e01d2c9a20bbffd8abe9402655a3d81, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:39,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-06T08:20:39,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; OpenRegionProcedure 8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 in 171 msec 2024-12-06T08:20:39,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-06T08:20:39,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e01d2c9a20bbffd8abe9402655a3d81, ASSIGN in 325 msec 2024-12-06T08:20:39,029 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:20:39,030 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473239029"}]},"ts":"1733473239029"} 2024-12-06T08:20:39,030 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T08:20:39,032 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:20:39,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1510 sec 2024-12-06T08:20:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-06T08:20:39,987 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 125 completed 2024-12-06T08:20:39,988 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3cb726fe to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59bd764a 2024-12-06T08:20:39,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@238db126, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:39,993 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:39,994 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40432, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:39,995 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:20:39,995 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:20:39,997 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x301741f1 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22a6e9f 2024-12-06T08:20:39,999 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c60eb7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:40,000 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63cefe40 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32c12a30 2024-12-06T08:20:40,002 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79b10416, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:40,003 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65df2359 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5ef40578 2024-12-06T08:20:40,006 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f142b04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:40,006 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d0ab200 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32bb71c 2024-12-06T08:20:40,009 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de9f076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:40,009 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5871c039 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bc0f7c 2024-12-06T08:20:40,012 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4414259d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:40,013 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-12-06T08:20:40,023 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:40,023 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bc486e1 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11193a0c 2024-12-06T08:20:40,026 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d672ed2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:40,026 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2070263a to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7861b162 2024-12-06T08:20:40,029 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cf40102, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:40,030 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6050584c to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@154f0f85 2024-12-06T08:20:40,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@496fe03f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:40,034 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dd48863 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8a917b 2024-12-06T08:20:40,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3652e74d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:20:40,043 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-06T08:20:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-06T08:20:40,044 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:40,044 DEBUG [hconnection-0x58a5ec77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:40,044 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:40,045 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:40,045 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40434, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:40,051 DEBUG [hconnection-0x6c2f3258-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:40,051 DEBUG [hconnection-0x6f6059ab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:40,051 DEBUG [hconnection-0x4746177f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:40,051 DEBUG [hconnection-0x3b17e29f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:40,051 DEBUG [hconnection-0x41391b5d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:40,052 DEBUG [hconnection-0x34f815bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:40,052 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40458, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:40,052 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40448, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:40,052 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:40,052 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40470, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:40,053 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:40,053 DEBUG [hconnection-0x63a2d58-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:40,053 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40478, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:40,053 DEBUG [hconnection-0x4c5aa4ee-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:40,054 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40480, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:40,054 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:40,056 DEBUG [hconnection-0x1eb24088-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:20:40,057 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40498, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:20:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:40,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:20:40,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:40,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:40,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:40,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:40,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:40,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:40,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/4a947f29a88441b8a148743e3ca1c161 is 50, key is test_row_0/A:col10/1733473240057/Put/seqid=0 2024-12-06T08:20:40,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742282_1458 (size=12001) 2024-12-06T08:20:40,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473300118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473300119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473300120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473300120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473300122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-06T08:20:40,196 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-06T08:20:40,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:40,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:40,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:40,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:40,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:40,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:40,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473300223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473300223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473300223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473300224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473300224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-06T08:20:40,348 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-06T08:20:40,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:40,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:40,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:40,349 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:40,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:40,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:40,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473300426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473300426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473300426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473300427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473300426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/4a947f29a88441b8a148743e3ca1c161 2024-12-06T08:20:40,501 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-06T08:20:40,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:40,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:40,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:40,502 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:40,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:40,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/fe11b321ee8f4bd09873db16afff56d9 is 50, key is test_row_0/B:col10/1733473240057/Put/seqid=0 2024-12-06T08:20:40,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742283_1459 (size=12001) 2024-12-06T08:20:40,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/fe11b321ee8f4bd09873db16afff56d9 2024-12-06T08:20:40,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/7d89a23ced0f4d448bcfdf72218ee6f9 is 50, key is test_row_0/C:col10/1733473240057/Put/seqid=0 2024-12-06T08:20:40,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742284_1460 (size=12001) 2024-12-06T08:20:40,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/7d89a23ced0f4d448bcfdf72218ee6f9 2024-12-06T08:20:40,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/4a947f29a88441b8a148743e3ca1c161 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/4a947f29a88441b8a148743e3ca1c161 2024-12-06T08:20:40,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/4a947f29a88441b8a148743e3ca1c161, entries=150, sequenceid=14, filesize=11.7 K 2024-12-06T08:20:40,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/fe11b321ee8f4bd09873db16afff56d9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/fe11b321ee8f4bd09873db16afff56d9 2024-12-06T08:20:40,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/fe11b321ee8f4bd09873db16afff56d9, entries=150, sequenceid=14, filesize=11.7 K 2024-12-06T08:20:40,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/7d89a23ced0f4d448bcfdf72218ee6f9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d89a23ced0f4d448bcfdf72218ee6f9 2024-12-06T08:20:40,596 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d89a23ced0f4d448bcfdf72218ee6f9, entries=150, sequenceid=14, filesize=11.7 K 2024-12-06T08:20:40,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8e01d2c9a20bbffd8abe9402655a3d81 in 540ms, sequenceid=14, compaction requested=false 2024-12-06T08:20:40,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-06T08:20:40,653 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-06T08:20:40,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:40,654 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T08:20:40,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:40,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:40,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:40,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:40,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:40,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:40,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/a6247bb980294f9dab1250c5acfcb6c4 is 50, key is test_row_0/A:col10/1733473240117/Put/seqid=0 2024-12-06T08:20:40,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742285_1461 (size=12001) 2024-12-06T08:20:40,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:40,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:40,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473300739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473300742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473300743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473300743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473300744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473300845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473300847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473300850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473300851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:40,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473300851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473301049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473301053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473301054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473301054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473301054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,063 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/a6247bb980294f9dab1250c5acfcb6c4 2024-12-06T08:20:41,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/0e827d60aac24c69ad41bf0570dd8e51 is 50, key is test_row_0/B:col10/1733473240117/Put/seqid=0 2024-12-06T08:20:41,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742286_1462 (size=12001) 2024-12-06T08:20:41,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-06T08:20:41,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473301352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473301357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473301357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473301358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473301358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,482 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/0e827d60aac24c69ad41bf0570dd8e51 2024-12-06T08:20:41,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/254d1b5e9ea846978e351bb1e1ddf08d is 50, key is test_row_0/C:col10/1733473240117/Put/seqid=0 2024-12-06T08:20:41,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742287_1463 (size=12001) 2024-12-06T08:20:41,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473301857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473301860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473301861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473301862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:41,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473301864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:41,893 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/254d1b5e9ea846978e351bb1e1ddf08d 2024-12-06T08:20:41,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/a6247bb980294f9dab1250c5acfcb6c4 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a6247bb980294f9dab1250c5acfcb6c4 2024-12-06T08:20:41,901 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a6247bb980294f9dab1250c5acfcb6c4, entries=150, sequenceid=37, filesize=11.7 K 2024-12-06T08:20:41,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/0e827d60aac24c69ad41bf0570dd8e51 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0e827d60aac24c69ad41bf0570dd8e51 2024-12-06T08:20:41,905 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0e827d60aac24c69ad41bf0570dd8e51, entries=150, sequenceid=37, filesize=11.7 K 2024-12-06T08:20:41,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/254d1b5e9ea846978e351bb1e1ddf08d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/254d1b5e9ea846978e351bb1e1ddf08d 2024-12-06T08:20:41,909 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/254d1b5e9ea846978e351bb1e1ddf08d, entries=150, sequenceid=37, filesize=11.7 K 2024-12-06T08:20:41,910 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 8e01d2c9a20bbffd8abe9402655a3d81 in 1256ms, sequenceid=37, compaction requested=false 2024-12-06T08:20:41,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:41,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:41,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-06T08:20:41,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-06T08:20:41,912 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-06T08:20:41,912 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8660 sec 2024-12-06T08:20:41,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.8700 sec 2024-12-06T08:20:42,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-06T08:20:42,148 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-06T08:20:42,149 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:42,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-06T08:20:42,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T08:20:42,151 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:42,151 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:42,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T08:20:42,303 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:42,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-06T08:20:42,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:42,304 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T08:20:42,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:42,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:42,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:42,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:42,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:42,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:42,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/dd923086402d42ae96b247fc77000aaf is 50, key is test_row_0/A:col10/1733473240743/Put/seqid=0 2024-12-06T08:20:42,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742288_1464 (size=12001) 2024-12-06T08:20:42,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T08:20:42,726 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/dd923086402d42ae96b247fc77000aaf 2024-12-06T08:20:42,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/55bf3273536b4800900e9d6e2407737e is 50, key is test_row_0/B:col10/1733473240743/Put/seqid=0 2024-12-06T08:20:42,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742289_1465 (size=12001) 2024-12-06T08:20:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T08:20:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:42,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:42,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:42,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473302891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:42,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473302895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:42,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473302896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:42,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:42,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473302897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:42,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:42,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473302897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:42,997 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T08:20:43,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473302998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473303005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473303005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473303006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473303007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,137 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/55bf3273536b4800900e9d6e2407737e 2024-12-06T08:20:43,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5c0a522866554b8486bedf1659d743f1 is 50, key is test_row_0/C:col10/1733473240743/Put/seqid=0 2024-12-06T08:20:43,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742290_1466 (size=12001) 2024-12-06T08:20:43,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473303206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473303212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473303213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473303213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473303214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T08:20:43,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473303514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473303520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473303520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473303521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473303521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:43,553 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5c0a522866554b8486bedf1659d743f1 2024-12-06T08:20:43,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/dd923086402d42ae96b247fc77000aaf as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/dd923086402d42ae96b247fc77000aaf 2024-12-06T08:20:43,561 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/dd923086402d42ae96b247fc77000aaf, entries=150, sequenceid=50, filesize=11.7 K 2024-12-06T08:20:43,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/55bf3273536b4800900e9d6e2407737e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/55bf3273536b4800900e9d6e2407737e 2024-12-06T08:20:43,564 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/55bf3273536b4800900e9d6e2407737e, entries=150, sequenceid=50, filesize=11.7 K 2024-12-06T08:20:43,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5c0a522866554b8486bedf1659d743f1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5c0a522866554b8486bedf1659d743f1 2024-12-06T08:20:43,573 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5c0a522866554b8486bedf1659d743f1, entries=150, sequenceid=50, filesize=11.7 K 2024-12-06T08:20:43,574 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8e01d2c9a20bbffd8abe9402655a3d81 in 1270ms, sequenceid=50, compaction requested=true 2024-12-06T08:20:43,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:43,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:43,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-06T08:20:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-06T08:20:43,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-06T08:20:43,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4250 sec 2024-12-06T08:20:43,579 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.4290 sec 2024-12-06T08:20:44,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:44,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T08:20:44,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:44,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:44,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:44,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:44,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:44,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:44,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/15cdebe030bd450eb6acbf5153061be1 is 50, key is test_row_0/A:col10/1733473242896/Put/seqid=0 2024-12-06T08:20:44,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742291_1467 (size=14341) 2024-12-06T08:20:44,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473304035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473304036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473304036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473304041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473304041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473304142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473304142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473304142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473304142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473304142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-06T08:20:44,254 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-06T08:20:44,256 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:44,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-06T08:20:44,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T08:20:44,257 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:44,257 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:44,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:44,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473304348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473304348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473304349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473304349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473304350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T08:20:44,409 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-06T08:20:44,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:44,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,410 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/15cdebe030bd450eb6acbf5153061be1 2024-12-06T08:20:44,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/dc49cc07b2364d2e818fe434b7fd9984 is 50, key is test_row_0/B:col10/1733473242896/Put/seqid=0 2024-12-06T08:20:44,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742292_1468 (size=12001) 2024-12-06T08:20:44,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T08:20:44,562 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-06T08:20:44,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:44,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473304651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473304652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473304652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473304652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:44,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473304653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-06T08:20:44,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:44,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,716 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/dc49cc07b2364d2e818fe434b7fd9984 2024-12-06T08:20:44,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/b90c210757844153b172c081a196b8b9 is 50, key is test_row_0/C:col10/1733473242896/Put/seqid=0 2024-12-06T08:20:44,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T08:20:44,868 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:44,869 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-06T08:20:44,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:44,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,869 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:44,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742293_1469 (size=12001) 2024-12-06T08:20:44,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/b90c210757844153b172c081a196b8b9 2024-12-06T08:20:44,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/15cdebe030bd450eb6acbf5153061be1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/15cdebe030bd450eb6acbf5153061be1 2024-12-06T08:20:44,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/15cdebe030bd450eb6acbf5153061be1, entries=200, sequenceid=74, filesize=14.0 K 2024-12-06T08:20:44,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/dc49cc07b2364d2e818fe434b7fd9984 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/dc49cc07b2364d2e818fe434b7fd9984 2024-12-06T08:20:44,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/dc49cc07b2364d2e818fe434b7fd9984, entries=150, sequenceid=74, filesize=11.7 K 2024-12-06T08:20:44,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/b90c210757844153b172c081a196b8b9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/b90c210757844153b172c081a196b8b9 2024-12-06T08:20:44,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/b90c210757844153b172c081a196b8b9, entries=150, sequenceid=74, filesize=11.7 K 2024-12-06T08:20:44,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 8e01d2c9a20bbffd8abe9402655a3d81 in 863ms, sequenceid=74, compaction requested=true 2024-12-06T08:20:44,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:44,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:44,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:44,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:44,889 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:44,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:44,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:44,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T08:20:44,889 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:44,890 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:44,890 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/B is initiating minor compaction (all files) 2024-12-06T08:20:44,890 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/B in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,890 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/fe11b321ee8f4bd09873db16afff56d9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0e827d60aac24c69ad41bf0570dd8e51, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/55bf3273536b4800900e9d6e2407737e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/dc49cc07b2364d2e818fe434b7fd9984] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=46.9 K 2024-12-06T08:20:44,890 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50344 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:44,890 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/A is initiating minor compaction (all files) 2024-12-06T08:20:44,890 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/A in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,891 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/4a947f29a88441b8a148743e3ca1c161, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a6247bb980294f9dab1250c5acfcb6c4, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/dd923086402d42ae96b247fc77000aaf, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/15cdebe030bd450eb6acbf5153061be1] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=49.2 K 2024-12-06T08:20:44,891 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a947f29a88441b8a148743e3ca1c161, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733473240056 2024-12-06T08:20:44,891 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting fe11b321ee8f4bd09873db16afff56d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733473240056 2024-12-06T08:20:44,891 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6247bb980294f9dab1250c5acfcb6c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733473240117 2024-12-06T08:20:44,891 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e827d60aac24c69ad41bf0570dd8e51, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733473240117 2024-12-06T08:20:44,892 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd923086402d42ae96b247fc77000aaf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733473240741 2024-12-06T08:20:44,892 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 55bf3273536b4800900e9d6e2407737e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733473240741 2024-12-06T08:20:44,892 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15cdebe030bd450eb6acbf5153061be1, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733473242894 2024-12-06T08:20:44,892 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting dc49cc07b2364d2e818fe434b7fd9984, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733473242896 2024-12-06T08:20:44,900 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#A#compaction#393 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:44,900 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/2296bad74f8c4fc69753a2e269b56800 is 50, key is test_row_0/A:col10/1733473242896/Put/seqid=0 2024-12-06T08:20:44,902 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#B#compaction#394 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:44,903 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/12f501cefe8e4a3a94b73605e03cde00 is 50, key is test_row_0/B:col10/1733473242896/Put/seqid=0 2024-12-06T08:20:44,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742294_1470 (size=12139) 2024-12-06T08:20:44,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742295_1471 (size=12139) 2024-12-06T08:20:44,929 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/12f501cefe8e4a3a94b73605e03cde00 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/12f501cefe8e4a3a94b73605e03cde00 2024-12-06T08:20:44,933 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/B of 8e01d2c9a20bbffd8abe9402655a3d81 into 12f501cefe8e4a3a94b73605e03cde00(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:44,933 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:44,933 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/B, priority=12, startTime=1733473244889; duration=0sec 2024-12-06T08:20:44,933 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:44,933 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:B 2024-12-06T08:20:44,933 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:44,935 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:44,935 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/C is initiating minor compaction (all files) 2024-12-06T08:20:44,935 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/C in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:44,935 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d89a23ced0f4d448bcfdf72218ee6f9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/254d1b5e9ea846978e351bb1e1ddf08d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5c0a522866554b8486bedf1659d743f1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/b90c210757844153b172c081a196b8b9] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=46.9 K 2024-12-06T08:20:44,935 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d89a23ced0f4d448bcfdf72218ee6f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733473240056 2024-12-06T08:20:44,936 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 254d1b5e9ea846978e351bb1e1ddf08d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733473240117 2024-12-06T08:20:44,936 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c0a522866554b8486bedf1659d743f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733473240741 2024-12-06T08:20:44,937 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting b90c210757844153b172c081a196b8b9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733473242896 2024-12-06T08:20:44,947 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#C#compaction#395 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:44,948 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/78528288789848c48dec22c1c3efc51a is 50, key is test_row_0/C:col10/1733473242896/Put/seqid=0 2024-12-06T08:20:44,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742296_1472 (size=12139) 2024-12-06T08:20:44,960 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/78528288789848c48dec22c1c3efc51a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/78528288789848c48dec22c1c3efc51a 2024-12-06T08:20:44,966 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/C of 8e01d2c9a20bbffd8abe9402655a3d81 into 78528288789848c48dec22c1c3efc51a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:44,966 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:44,966 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/C, priority=12, startTime=1733473244889; duration=0sec 2024-12-06T08:20:44,966 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:44,966 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:C 2024-12-06T08:20:45,021 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-06T08:20:45,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:45,022 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T08:20:45,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:45,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:45,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:45,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:45,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:45,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:45,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/819787ddff6a47de931955217609d15d is 50, key is test_row_0/A:col10/1733473244040/Put/seqid=0 2024-12-06T08:20:45,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742297_1473 (size=12001) 2024-12-06T08:20:45,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:45,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:45,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473305187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473305187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473305187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473305188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473305188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473305293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473305293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473305294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473305295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473305295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,325 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/2296bad74f8c4fc69753a2e269b56800 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/2296bad74f8c4fc69753a2e269b56800 2024-12-06T08:20:45,329 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/A of 8e01d2c9a20bbffd8abe9402655a3d81 into 2296bad74f8c4fc69753a2e269b56800(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:45,329 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:45,329 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/A, priority=12, startTime=1733473244889; duration=0sec 2024-12-06T08:20:45,329 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:45,329 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:A 2024-12-06T08:20:45,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T08:20:45,434 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/819787ddff6a47de931955217609d15d 2024-12-06T08:20:45,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/9072a89636374abc8d8726c0ed96b9c5 is 50, key is test_row_0/B:col10/1733473244040/Put/seqid=0 2024-12-06T08:20:45,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742298_1474 (size=12001) 2024-12-06T08:20:45,452 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/9072a89636374abc8d8726c0ed96b9c5 2024-12-06T08:20:45,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/7d3dc5a033ae4a02b64acfea1d1774b7 is 50, key is test_row_0/C:col10/1733473244040/Put/seqid=0 2024-12-06T08:20:45,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742299_1475 (size=12001) 2024-12-06T08:20:45,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473305499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473305500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473305500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473305500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473305500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473305805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473305806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473305806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473305806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473305807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:45,864 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/7d3dc5a033ae4a02b64acfea1d1774b7 2024-12-06T08:20:45,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/819787ddff6a47de931955217609d15d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/819787ddff6a47de931955217609d15d 2024-12-06T08:20:45,872 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/819787ddff6a47de931955217609d15d, entries=150, sequenceid=89, filesize=11.7 K 2024-12-06T08:20:45,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/9072a89636374abc8d8726c0ed96b9c5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/9072a89636374abc8d8726c0ed96b9c5 2024-12-06T08:20:45,876 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/9072a89636374abc8d8726c0ed96b9c5, entries=150, sequenceid=89, filesize=11.7 K 2024-12-06T08:20:45,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/7d3dc5a033ae4a02b64acfea1d1774b7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d3dc5a033ae4a02b64acfea1d1774b7 2024-12-06T08:20:45,879 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d3dc5a033ae4a02b64acfea1d1774b7, entries=150, sequenceid=89, filesize=11.7 K 2024-12-06T08:20:45,880 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 8e01d2c9a20bbffd8abe9402655a3d81 in 858ms, sequenceid=89, compaction requested=false 2024-12-06T08:20:45,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:45,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:45,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-06T08:20:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-06T08:20:45,882 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-06T08:20:45,882 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6230 sec 2024-12-06T08:20:45,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.6270 sec 2024-12-06T08:20:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:46,314 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:20:46,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:46,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:46,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:46,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:46,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:46,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:46,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/f26f361f4cea4103b2d4aec01456b059 is 50, key is test_row_0/A:col10/1733473246313/Put/seqid=0 2024-12-06T08:20:46,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473306319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473306320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742300_1476 (size=14341) 2024-12-06T08:20:46,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473306323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473306324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473306324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/f26f361f4cea4103b2d4aec01456b059 2024-12-06T08:20:46,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/21a4e4dd669c4a6c9f6001a16ccb582c is 50, key is test_row_0/B:col10/1733473246313/Put/seqid=0 2024-12-06T08:20:46,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742301_1477 (size=12001) 2024-12-06T08:20:46,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/21a4e4dd669c4a6c9f6001a16ccb582c 2024-12-06T08:20:46,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5707d70e73ea4a239c8b6358cef749f3 is 50, key is test_row_0/C:col10/1733473246313/Put/seqid=0 2024-12-06T08:20:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-06T08:20:46,361 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-06T08:20:46,363 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-06T08:20:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-06T08:20:46,364 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:46,364 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:46,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:46,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742302_1478 (size=12001) 2024-12-06T08:20:46,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473306425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473306426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473306430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473306430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473306431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-06T08:20:46,516 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T08:20:46,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:46,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:46,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:46,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:46,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:46,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473306632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473306633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473306634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473306634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473306635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-06T08:20:46,669 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T08:20:46,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:46,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:46,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:46,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:46,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:46,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5707d70e73ea4a239c8b6358cef749f3 2024-12-06T08:20:46,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/f26f361f4cea4103b2d4aec01456b059 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f26f361f4cea4103b2d4aec01456b059 2024-12-06T08:20:46,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f26f361f4cea4103b2d4aec01456b059, entries=200, sequenceid=115, filesize=14.0 K 2024-12-06T08:20:46,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/21a4e4dd669c4a6c9f6001a16ccb582c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/21a4e4dd669c4a6c9f6001a16ccb582c 2024-12-06T08:20:46,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/21a4e4dd669c4a6c9f6001a16ccb582c, entries=150, sequenceid=115, filesize=11.7 K 2024-12-06T08:20:46,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5707d70e73ea4a239c8b6358cef749f3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5707d70e73ea4a239c8b6358cef749f3 2024-12-06T08:20:46,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5707d70e73ea4a239c8b6358cef749f3, entries=150, sequenceid=115, filesize=11.7 K 2024-12-06T08:20:46,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 8e01d2c9a20bbffd8abe9402655a3d81 in 476ms, sequenceid=115, compaction requested=true 2024-12-06T08:20:46,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:46,791 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:46,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:46,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:46,791 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:46,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:46,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:46,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:46,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:46,792 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:46,792 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/A is initiating minor compaction (all files) 2024-12-06T08:20:46,792 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/A in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:46,792 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/2296bad74f8c4fc69753a2e269b56800, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/819787ddff6a47de931955217609d15d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f26f361f4cea4103b2d4aec01456b059] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=37.6 K 2024-12-06T08:20:46,792 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:46,792 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2296bad74f8c4fc69753a2e269b56800, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733473242896 2024-12-06T08:20:46,792 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/B is initiating minor compaction (all files) 2024-12-06T08:20:46,792 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/B in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:46,792 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/12f501cefe8e4a3a94b73605e03cde00, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/9072a89636374abc8d8726c0ed96b9c5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/21a4e4dd669c4a6c9f6001a16ccb582c] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=35.3 K 2024-12-06T08:20:46,793 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 819787ddff6a47de931955217609d15d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733473244032 2024-12-06T08:20:46,793 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 12f501cefe8e4a3a94b73605e03cde00, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733473242896 2024-12-06T08:20:46,793 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting f26f361f4cea4103b2d4aec01456b059, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733473245186 2024-12-06T08:20:46,793 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 9072a89636374abc8d8726c0ed96b9c5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733473244032 2024-12-06T08:20:46,794 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 21a4e4dd669c4a6c9f6001a16ccb582c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733473245186 2024-12-06T08:20:46,806 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#B#compaction#402 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:46,807 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/741bca471b3447089d231a0642bfaf0b is 50, key is test_row_0/B:col10/1733473246313/Put/seqid=0 2024-12-06T08:20:46,809 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#A#compaction#403 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:46,810 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/6b571e76a2794facbaf2b49e96d9c497 is 50, key is test_row_0/A:col10/1733473246313/Put/seqid=0 2024-12-06T08:20:46,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742303_1479 (size=12241) 2024-12-06T08:20:46,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742304_1480 (size=12241) 2024-12-06T08:20:46,820 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/741bca471b3447089d231a0642bfaf0b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/741bca471b3447089d231a0642bfaf0b 2024-12-06T08:20:46,822 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,822 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-06T08:20:46,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:46,823 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:20:46,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:46,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:46,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:46,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:46,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:46,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:46,827 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/B of 8e01d2c9a20bbffd8abe9402655a3d81 into 741bca471b3447089d231a0642bfaf0b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:46,827 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:46,827 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/B, priority=13, startTime=1733473246791; duration=0sec 2024-12-06T08:20:46,827 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:46,827 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:B 2024-12-06T08:20:46,828 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:46,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/a5f1c08d247149438ca9e6ffaea3278a is 50, key is test_row_0/A:col10/1733473246322/Put/seqid=0 2024-12-06T08:20:46,828 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:46,828 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/C is initiating minor compaction (all files) 2024-12-06T08:20:46,829 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/C in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:46,829 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/78528288789848c48dec22c1c3efc51a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d3dc5a033ae4a02b64acfea1d1774b7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5707d70e73ea4a239c8b6358cef749f3] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=35.3 K 2024-12-06T08:20:46,829 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/6b571e76a2794facbaf2b49e96d9c497 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/6b571e76a2794facbaf2b49e96d9c497 2024-12-06T08:20:46,829 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 78528288789848c48dec22c1c3efc51a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733473242896 2024-12-06T08:20:46,830 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d3dc5a033ae4a02b64acfea1d1774b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733473244032 2024-12-06T08:20:46,830 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5707d70e73ea4a239c8b6358cef749f3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733473245186 2024-12-06T08:20:46,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742305_1481 (size=12001) 2024-12-06T08:20:46,835 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/a5f1c08d247149438ca9e6ffaea3278a 2024-12-06T08:20:46,835 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/A of 8e01d2c9a20bbffd8abe9402655a3d81 into 6b571e76a2794facbaf2b49e96d9c497(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:46,835 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:46,835 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/A, priority=13, startTime=1733473246791; duration=0sec 2024-12-06T08:20:46,835 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:46,835 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:A 2024-12-06T08:20:46,839 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#C#compaction#405 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:46,839 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/4c7f99b6669741c2ae66e40c3a1463b2 is 50, key is test_row_0/C:col10/1733473246313/Put/seqid=0 2024-12-06T08:20:46,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742306_1482 (size=12241) 2024-12-06T08:20:46,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/b4e2498c0cd54f51abbacd5b34978edb is 50, key is test_row_0/B:col10/1733473246322/Put/seqid=0 2024-12-06T08:20:46,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742307_1483 (size=12001) 2024-12-06T08:20:46,847 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/b4e2498c0cd54f51abbacd5b34978edb 2024-12-06T08:20:46,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/f333d8035f744e4191d08daedc979b29 is 50, key is test_row_0/C:col10/1733473246322/Put/seqid=0 2024-12-06T08:20:46,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742308_1484 (size=12001) 2024-12-06T08:20:46,864 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/f333d8035f744e4191d08daedc979b29 2024-12-06T08:20:46,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/a5f1c08d247149438ca9e6ffaea3278a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a5f1c08d247149438ca9e6ffaea3278a 2024-12-06T08:20:46,871 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a5f1c08d247149438ca9e6ffaea3278a, entries=150, sequenceid=126, filesize=11.7 K 2024-12-06T08:20:46,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/b4e2498c0cd54f51abbacd5b34978edb as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/b4e2498c0cd54f51abbacd5b34978edb 2024-12-06T08:20:46,876 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/b4e2498c0cd54f51abbacd5b34978edb, entries=150, sequenceid=126, filesize=11.7 K 2024-12-06T08:20:46,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/f333d8035f744e4191d08daedc979b29 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/f333d8035f744e4191d08daedc979b29 2024-12-06T08:20:46,881 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/f333d8035f744e4191d08daedc979b29, entries=150, sequenceid=126, filesize=11.7 K 2024-12-06T08:20:46,882 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for 8e01d2c9a20bbffd8abe9402655a3d81 in 59ms, sequenceid=126, compaction requested=false 2024-12-06T08:20:46,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:46,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:46,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-06T08:20:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-06T08:20:46,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-06T08:20:46,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 518 msec 2024-12-06T08:20:46,886 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 522 msec 2024-12-06T08:20:46,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:20:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:46,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:46,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:46,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:46,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:46,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:46,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:46,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/b4e3f4dbca0f415883ef9afee011194a is 50, key is test_row_0/A:col10/1733473246948/Put/seqid=0 2024-12-06T08:20:46,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742309_1485 (size=16931) 2024-12-06T08:20:46,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/b4e3f4dbca0f415883ef9afee011194a 2024-12-06T08:20:46,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-06T08:20:46,966 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-06T08:20:46,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/332e98a4961a4acaa34f8ba94b41e6a8 is 50, key is test_row_0/B:col10/1733473246948/Put/seqid=0 2024-12-06T08:20:46,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-06T08:20:46,970 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T08:20:46,971 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:46,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:46,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742310_1486 (size=12151) 2024-12-06T08:20:46,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473306976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473306977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473306980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473306990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:46,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:46,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473306990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T08:20:47,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473307091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473307091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473307095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473307097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473307099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,122 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,123 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T08:20:47,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:47,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:47,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:47,123 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:47,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:47,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:47,247 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/4c7f99b6669741c2ae66e40c3a1463b2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/4c7f99b6669741c2ae66e40c3a1463b2 2024-12-06T08:20:47,252 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/C of 8e01d2c9a20bbffd8abe9402655a3d81 into 4c7f99b6669741c2ae66e40c3a1463b2(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:47,252 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:47,252 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/C, priority=13, startTime=1733473246791; duration=0sec 2024-12-06T08:20:47,252 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:47,252 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:C 2024-12-06T08:20:47,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T08:20:47,275 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T08:20:47,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:47,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:47,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:47,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:47,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:47,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:47,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473307295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473307295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473307301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473307302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473307302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/332e98a4961a4acaa34f8ba94b41e6a8 2024-12-06T08:20:47,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5876dd32c64c477d843c2ef62ef1ffbd is 50, key is test_row_0/C:col10/1733473246948/Put/seqid=0 2024-12-06T08:20:47,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742311_1487 (size=12151) 2024-12-06T08:20:47,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5876dd32c64c477d843c2ef62ef1ffbd 2024-12-06T08:20:47,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/b4e3f4dbca0f415883ef9afee011194a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/b4e3f4dbca0f415883ef9afee011194a 2024-12-06T08:20:47,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/b4e3f4dbca0f415883ef9afee011194a, entries=250, sequenceid=140, filesize=16.5 K 2024-12-06T08:20:47,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/332e98a4961a4acaa34f8ba94b41e6a8 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/332e98a4961a4acaa34f8ba94b41e6a8 2024-12-06T08:20:47,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/332e98a4961a4acaa34f8ba94b41e6a8, entries=150, sequenceid=140, filesize=11.9 K 2024-12-06T08:20:47,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5876dd32c64c477d843c2ef62ef1ffbd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5876dd32c64c477d843c2ef62ef1ffbd 2024-12-06T08:20:47,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5876dd32c64c477d843c2ef62ef1ffbd, entries=150, sequenceid=140, filesize=11.9 K 2024-12-06T08:20:47,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 8e01d2c9a20bbffd8abe9402655a3d81 in 465ms, sequenceid=140, compaction requested=true 2024-12-06T08:20:47,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:47,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:47,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:47,414 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:47,414 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:47,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:47,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:47,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:47,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:47,415 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41173 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:47,415 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:47,415 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/B is initiating minor compaction (all files) 2024-12-06T08:20:47,415 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/A is initiating minor compaction (all files) 2024-12-06T08:20:47,415 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/B in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:47,415 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/A in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:47,415 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/741bca471b3447089d231a0642bfaf0b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/b4e2498c0cd54f51abbacd5b34978edb, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/332e98a4961a4acaa34f8ba94b41e6a8] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=35.5 K 2024-12-06T08:20:47,415 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/6b571e76a2794facbaf2b49e96d9c497, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a5f1c08d247149438ca9e6ffaea3278a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/b4e3f4dbca0f415883ef9afee011194a] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=40.2 K 2024-12-06T08:20:47,416 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b571e76a2794facbaf2b49e96d9c497, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733473245186 2024-12-06T08:20:47,416 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 741bca471b3447089d231a0642bfaf0b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733473245186 2024-12-06T08:20:47,416 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5f1c08d247149438ca9e6ffaea3278a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733473246317 2024-12-06T08:20:47,416 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting b4e2498c0cd54f51abbacd5b34978edb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733473246317 2024-12-06T08:20:47,417 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 332e98a4961a4acaa34f8ba94b41e6a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1733473246943 2024-12-06T08:20:47,417 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4e3f4dbca0f415883ef9afee011194a, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1733473246943 2024-12-06T08:20:47,426 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#B#compaction#411 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:47,426 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/64e565867675445bbc1e1664ab807d4e is 50, key is test_row_0/B:col10/1733473246948/Put/seqid=0 2024-12-06T08:20:47,428 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-06T08:20:47,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:47,429 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:20:47,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:47,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:47,429 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#A#compaction#412 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:47,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:47,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:47,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:47,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:47,430 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/e47c09e021d248998bfe942db4fb1f32 is 50, key is test_row_0/A:col10/1733473246948/Put/seqid=0 2024-12-06T08:20:47,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/d8b144d73bcb45fa9ed7476f9dac166d is 50, key is test_row_0/A:col10/1733473246989/Put/seqid=0 2024-12-06T08:20:47,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742313_1489 (size=12493) 2024-12-06T08:20:47,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742312_1488 (size=12493) 2024-12-06T08:20:47,486 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/64e565867675445bbc1e1664ab807d4e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/64e565867675445bbc1e1664ab807d4e 2024-12-06T08:20:47,491 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/B of 8e01d2c9a20bbffd8abe9402655a3d81 into 64e565867675445bbc1e1664ab807d4e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:47,491 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:47,491 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/B, priority=13, startTime=1733473247414; duration=0sec 2024-12-06T08:20:47,491 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:47,491 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:B 2024-12-06T08:20:47,491 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:47,492 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:47,492 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/C is initiating minor compaction (all files) 2024-12-06T08:20:47,492 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/C in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:47,492 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/4c7f99b6669741c2ae66e40c3a1463b2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/f333d8035f744e4191d08daedc979b29, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5876dd32c64c477d843c2ef62ef1ffbd] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=35.5 K 2024-12-06T08:20:47,493 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c7f99b6669741c2ae66e40c3a1463b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733473245186 2024-12-06T08:20:47,494 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f333d8035f744e4191d08daedc979b29, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1733473246317 2024-12-06T08:20:47,496 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5876dd32c64c477d843c2ef62ef1ffbd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1733473246943 2024-12-06T08:20:47,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742314_1490 (size=12151) 2024-12-06T08:20:47,504 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#C#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:47,505 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/0bfb249e7b4541d6964e602048ecfece is 50, key is test_row_0/C:col10/1733473246948/Put/seqid=0 2024-12-06T08:20:47,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742315_1491 (size=12493) 2024-12-06T08:20:47,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T08:20:47,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:47,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:47,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473307611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473307611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473307612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473307612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473307615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473307717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473307718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473307718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473307719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473307721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,879 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/e47c09e021d248998bfe942db4fb1f32 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/e47c09e021d248998bfe942db4fb1f32 2024-12-06T08:20:47,883 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/A of 8e01d2c9a20bbffd8abe9402655a3d81 into e47c09e021d248998bfe942db4fb1f32(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:47,883 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:47,883 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/A, priority=13, startTime=1733473247414; duration=0sec 2024-12-06T08:20:47,883 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:47,883 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:A 2024-12-06T08:20:47,899 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/d8b144d73bcb45fa9ed7476f9dac166d 2024-12-06T08:20:47,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/1da4d7d12b1546b49943f605dec6504d is 50, key is test_row_0/B:col10/1733473246989/Put/seqid=0 2024-12-06T08:20:47,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742316_1492 (size=12151) 2024-12-06T08:20:47,914 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/0bfb249e7b4541d6964e602048ecfece as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/0bfb249e7b4541d6964e602048ecfece 2024-12-06T08:20:47,917 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/C of 8e01d2c9a20bbffd8abe9402655a3d81 into 0bfb249e7b4541d6964e602048ecfece(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:47,917 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:47,917 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/C, priority=13, startTime=1733473247414; duration=0sec 2024-12-06T08:20:47,917 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:47,917 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:C 2024-12-06T08:20:47,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473307922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473307923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473307923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473307923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:47,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:47,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473307924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T08:20:48,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:48,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473308227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:48,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473308228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:48,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473308229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:48,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473308229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:48,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473308230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,310 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/1da4d7d12b1546b49943f605dec6504d 2024-12-06T08:20:48,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/e1e275c3fba84a479176c849272b3a1d is 50, key is test_row_0/C:col10/1733473246989/Put/seqid=0 2024-12-06T08:20:48,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742317_1493 (size=12151) 2024-12-06T08:20:48,729 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/e1e275c3fba84a479176c849272b3a1d 2024-12-06T08:20:48,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/d8b144d73bcb45fa9ed7476f9dac166d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d8b144d73bcb45fa9ed7476f9dac166d 2024-12-06T08:20:48,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:48,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473308734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:48,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473308734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:48,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473308735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,736 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d8b144d73bcb45fa9ed7476f9dac166d, entries=150, sequenceid=166, filesize=11.9 K 2024-12-06T08:20:48,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/1da4d7d12b1546b49943f605dec6504d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1da4d7d12b1546b49943f605dec6504d 2024-12-06T08:20:48,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:48,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473308736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,740 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1da4d7d12b1546b49943f605dec6504d, entries=150, sequenceid=166, filesize=11.9 K 2024-12-06T08:20:48,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:48,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473308737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:48,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/e1e275c3fba84a479176c849272b3a1d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/e1e275c3fba84a479176c849272b3a1d 2024-12-06T08:20:48,744 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/e1e275c3fba84a479176c849272b3a1d, entries=150, sequenceid=166, filesize=11.9 K 2024-12-06T08:20:48,745 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 8e01d2c9a20bbffd8abe9402655a3d81 in 1317ms, sequenceid=166, compaction requested=false 2024-12-06T08:20:48,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:48,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:48,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-06T08:20:48,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-06T08:20:48,748 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-06T08:20:48,748 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7750 sec 2024-12-06T08:20:48,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.7790 sec 2024-12-06T08:20:49,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-06T08:20:49,074 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-06T08:20:49,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:49,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-06T08:20:49,077 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:49,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T08:20:49,078 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:49,078 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:49,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T08:20:49,229 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:49,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-06T08:20:49,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:49,230 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:20:49,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:49,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:49,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:49,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:49,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:49,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:49,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/3523437a90f647b4bab2de30aece58fc is 50, key is test_row_0/A:col10/1733473247610/Put/seqid=0 2024-12-06T08:20:49,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742318_1494 (size=12151) 2024-12-06T08:20:49,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T08:20:49,639 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/3523437a90f647b4bab2de30aece58fc 2024-12-06T08:20:49,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/6130564f7ff6437f972f183148310844 is 50, key is test_row_0/B:col10/1733473247610/Put/seqid=0 2024-12-06T08:20:49,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742319_1495 (size=12151) 2024-12-06T08:20:49,654 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/6130564f7ff6437f972f183148310844 2024-12-06T08:20:49,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/aa028b9f671e4e19916fdc6bb051c92e is 50, key is test_row_0/C:col10/1733473247610/Put/seqid=0 2024-12-06T08:20:49,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742320_1496 (size=12151) 2024-12-06T08:20:49,665 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/aa028b9f671e4e19916fdc6bb051c92e 2024-12-06T08:20:49,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/3523437a90f647b4bab2de30aece58fc as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/3523437a90f647b4bab2de30aece58fc 2024-12-06T08:20:49,674 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/3523437a90f647b4bab2de30aece58fc, entries=150, sequenceid=180, filesize=11.9 K 2024-12-06T08:20:49,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/6130564f7ff6437f972f183148310844 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6130564f7ff6437f972f183148310844 2024-12-06T08:20:49,678 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6130564f7ff6437f972f183148310844, entries=150, sequenceid=180, filesize=11.9 K 2024-12-06T08:20:49,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/aa028b9f671e4e19916fdc6bb051c92e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/aa028b9f671e4e19916fdc6bb051c92e 2024-12-06T08:20:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T08:20:49,685 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/aa028b9f671e4e19916fdc6bb051c92e, entries=150, sequenceid=180, filesize=11.9 K 2024-12-06T08:20:49,685 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for 8e01d2c9a20bbffd8abe9402655a3d81 in 455ms, sequenceid=180, compaction requested=true 2024-12-06T08:20:49,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:49,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:49,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-06T08:20:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-06T08:20:49,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-06T08:20:49,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 610 msec 2024-12-06T08:20:49,691 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 614 msec 2024-12-06T08:20:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:49,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:20:49,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:49,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:49,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:49,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:49,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:49,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:49,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/c88e6414570c41e0bab20dda04b13970 is 50, key is test_row_0/A:col10/1733473249747/Put/seqid=0 2024-12-06T08:20:49,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742321_1497 (size=14541) 2024-12-06T08:20:49,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473309777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:49,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473309783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:49,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473309784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:49,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473309784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:49,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473309785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:49,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:49,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473309886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:49,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:49,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473309891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:49,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:49,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473309892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:49,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:49,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473309892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:49,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:49,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473309893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473310093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473310099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473310099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473310100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473310102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,168 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/c88e6414570c41e0bab20dda04b13970 2024-12-06T08:20:50,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/6ef82f2685af4f99b37d79a74c26cd0a is 50, key is test_row_0/B:col10/1733473249747/Put/seqid=0 2024-12-06T08:20:50,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-06T08:20:50,181 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-06T08:20:50,182 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:50,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-06T08:20:50,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T08:20:50,183 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:50,184 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:50,184 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:50,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742322_1498 (size=12151) 2024-12-06T08:20:50,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T08:20:50,335 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-06T08:20:50,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:50,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:50,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:50,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473310400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473310405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473310405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473310405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473310406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T08:20:50,489 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-06T08:20:50,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:50,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:50,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:50,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/6ef82f2685af4f99b37d79a74c26cd0a 2024-12-06T08:20:50,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/d4961c265d454f5185312da4e51185f5 is 50, key is test_row_0/C:col10/1733473249747/Put/seqid=0 2024-12-06T08:20:50,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742323_1499 (size=12151) 2024-12-06T08:20:50,642 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-06T08:20:50,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:50,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:50,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:50,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T08:20:50,795 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-06T08:20:50,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:50,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:50,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:50,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473310905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473310912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473310913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473310914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:50,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473310915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,947 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:50,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-06T08:20:50,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:50,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:50,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:50,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:50,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:51,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/d4961c265d454f5185312da4e51185f5 2024-12-06T08:20:51,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/c88e6414570c41e0bab20dda04b13970 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c88e6414570c41e0bab20dda04b13970 2024-12-06T08:20:51,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c88e6414570c41e0bab20dda04b13970, entries=200, sequenceid=191, filesize=14.2 K 2024-12-06T08:20:51,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/6ef82f2685af4f99b37d79a74c26cd0a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6ef82f2685af4f99b37d79a74c26cd0a 2024-12-06T08:20:51,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6ef82f2685af4f99b37d79a74c26cd0a, entries=150, sequenceid=191, filesize=11.9 K 2024-12-06T08:20:51,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/d4961c265d454f5185312da4e51185f5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/d4961c265d454f5185312da4e51185f5 2024-12-06T08:20:51,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/d4961c265d454f5185312da4e51185f5, entries=150, sequenceid=191, filesize=11.9 K 2024-12-06T08:20:51,015 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8e01d2c9a20bbffd8abe9402655a3d81 in 1265ms, sequenceid=191, compaction requested=true 2024-12-06T08:20:51,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:51,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:51,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:51,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:51,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:51,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:51,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:51,015 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:51,015 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:51,016 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51336 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:51,016 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:51,017 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/A is initiating minor compaction (all files) 2024-12-06T08:20:51,017 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/B is initiating minor compaction (all files) 2024-12-06T08:20:51,017 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/A in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:51,017 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/B in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:51,017 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/e47c09e021d248998bfe942db4fb1f32, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d8b144d73bcb45fa9ed7476f9dac166d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/3523437a90f647b4bab2de30aece58fc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c88e6414570c41e0bab20dda04b13970] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=50.1 K 2024-12-06T08:20:51,017 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/64e565867675445bbc1e1664ab807d4e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1da4d7d12b1546b49943f605dec6504d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6130564f7ff6437f972f183148310844, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6ef82f2685af4f99b37d79a74c26cd0a] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=47.8 K 2024-12-06T08:20:51,017 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting e47c09e021d248998bfe942db4fb1f32, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1733473246943 2024-12-06T08:20:51,017 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 64e565867675445bbc1e1664ab807d4e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1733473246943 2024-12-06T08:20:51,017 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8b144d73bcb45fa9ed7476f9dac166d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733473246974 2024-12-06T08:20:51,017 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1da4d7d12b1546b49943f605dec6504d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733473246974 2024-12-06T08:20:51,018 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 6130564f7ff6437f972f183148310844, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733473247610 2024-12-06T08:20:51,018 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3523437a90f647b4bab2de30aece58fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733473247610 2024-12-06T08:20:51,018 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ef82f2685af4f99b37d79a74c26cd0a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733473249747 2024-12-06T08:20:51,018 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c88e6414570c41e0bab20dda04b13970, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733473249746 2024-12-06T08:20:51,029 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#A#compaction#423 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:51,030 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/5101f1bbae244eea966bdecfc263035d is 50, key is test_row_0/A:col10/1733473249747/Put/seqid=0 2024-12-06T08:20:51,043 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#B#compaction#424 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:51,044 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/614f69c99b504d23a5d05930ce1b7ce1 is 50, key is test_row_0/B:col10/1733473249747/Put/seqid=0 2024-12-06T08:20:51,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742324_1500 (size=12629) 2024-12-06T08:20:51,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742325_1501 (size=12629) 2024-12-06T08:20:51,059 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/5101f1bbae244eea966bdecfc263035d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5101f1bbae244eea966bdecfc263035d 2024-12-06T08:20:51,063 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/614f69c99b504d23a5d05930ce1b7ce1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/614f69c99b504d23a5d05930ce1b7ce1 2024-12-06T08:20:51,065 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/A of 8e01d2c9a20bbffd8abe9402655a3d81 into 5101f1bbae244eea966bdecfc263035d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:51,065 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:51,065 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/A, priority=12, startTime=1733473251015; duration=0sec 2024-12-06T08:20:51,065 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:51,065 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:A 2024-12-06T08:20:51,065 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:51,067 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/B of 8e01d2c9a20bbffd8abe9402655a3d81 into 614f69c99b504d23a5d05930ce1b7ce1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:51,067 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:51,067 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/B, priority=12, startTime=1733473251015; duration=0sec 2024-12-06T08:20:51,068 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:51,068 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:B 2024-12-06T08:20:51,068 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:51,068 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/C is initiating minor compaction (all files) 2024-12-06T08:20:51,068 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/C in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:51,069 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/0bfb249e7b4541d6964e602048ecfece, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/e1e275c3fba84a479176c849272b3a1d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/aa028b9f671e4e19916fdc6bb051c92e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/d4961c265d454f5185312da4e51185f5] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=47.8 K 2024-12-06T08:20:51,069 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bfb249e7b4541d6964e602048ecfece, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1733473246943 2024-12-06T08:20:51,069 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1e275c3fba84a479176c849272b3a1d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733473246974 2024-12-06T08:20:51,070 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa028b9f671e4e19916fdc6bb051c92e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733473247610 2024-12-06T08:20:51,070 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4961c265d454f5185312da4e51185f5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733473249747 2024-12-06T08:20:51,088 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#C#compaction#425 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:51,089 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/4aa02e2cadd04318b43572493474431a is 50, key is test_row_0/C:col10/1733473249747/Put/seqid=0 2024-12-06T08:20:51,100 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:51,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-06T08:20:51,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:51,101 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:20:51,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:51,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:51,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:51,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:51,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:51,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:51,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742326_1502 (size=12629) 2024-12-06T08:20:51,130 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/4aa02e2cadd04318b43572493474431a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/4aa02e2cadd04318b43572493474431a 2024-12-06T08:20:51,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/c7bad45f4387422ba53603207e17305f is 50, key is test_row_0/A:col10/1733473249783/Put/seqid=0 2024-12-06T08:20:51,135 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/C of 8e01d2c9a20bbffd8abe9402655a3d81 into 4aa02e2cadd04318b43572493474431a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:51,135 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:51,135 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/C, priority=12, startTime=1733473251015; duration=0sec 2024-12-06T08:20:51,136 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:51,136 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:C 2024-12-06T08:20:51,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742327_1503 (size=12151) 2024-12-06T08:20:51,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T08:20:51,581 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/c7bad45f4387422ba53603207e17305f 2024-12-06T08:20:51,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/0b5cd4ac15a6426ca3308eaf04aa3053 is 50, key is test_row_0/B:col10/1733473249783/Put/seqid=0 2024-12-06T08:20:51,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742328_1504 (size=12151) 2024-12-06T08:20:51,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:51,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:51,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:51,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473311924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:51,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:51,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473311925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:51,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:51,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473311926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:51,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473311929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:51,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473311930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:51,992 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/0b5cd4ac15a6426ca3308eaf04aa3053 2024-12-06T08:20:51,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/28af1588ec4a4036b915ea11ad6027ee is 50, key is test_row_0/C:col10/1733473249783/Put/seqid=0 2024-12-06T08:20:52,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742329_1505 (size=12151) 2024-12-06T08:20:52,003 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/28af1588ec4a4036b915ea11ad6027ee 2024-12-06T08:20:52,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/c7bad45f4387422ba53603207e17305f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c7bad45f4387422ba53603207e17305f 2024-12-06T08:20:52,010 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c7bad45f4387422ba53603207e17305f, entries=150, sequenceid=218, filesize=11.9 K 2024-12-06T08:20:52,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/0b5cd4ac15a6426ca3308eaf04aa3053 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0b5cd4ac15a6426ca3308eaf04aa3053 2024-12-06T08:20:52,013 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0b5cd4ac15a6426ca3308eaf04aa3053, entries=150, sequenceid=218, filesize=11.9 K 2024-12-06T08:20:52,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/28af1588ec4a4036b915ea11ad6027ee as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/28af1588ec4a4036b915ea11ad6027ee 2024-12-06T08:20:52,017 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/28af1588ec4a4036b915ea11ad6027ee, entries=150, sequenceid=218, filesize=11.9 K 2024-12-06T08:20:52,019 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 8e01d2c9a20bbffd8abe9402655a3d81 in 918ms, sequenceid=218, compaction requested=false 2024-12-06T08:20:52,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:52,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:52,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-06T08:20:52,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-06T08:20:52,021 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-06T08:20:52,021 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8360 sec 2024-12-06T08:20:52,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.8390 sec 2024-12-06T08:20:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:52,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T08:20:52,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:52,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:52,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:52,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:52,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:52,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:52,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/d4e6d58d620a471abe62a5d7f6fafd9c is 50, key is test_row_0/A:col10/1733473251928/Put/seqid=0 2024-12-06T08:20:52,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742330_1506 (size=14541) 2024-12-06T08:20:52,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473312073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473312075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473312077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473312178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473312180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473312182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-06T08:20:52,287 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-06T08:20:52,289 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:52,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-06T08:20:52,290 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:52,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T08:20:52,291 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:52,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:52,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473312383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473312383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T08:20:52,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473312388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,442 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T08:20:52,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:52,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:52,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:52,443 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/d4e6d58d620a471abe62a5d7f6fafd9c 2024-12-06T08:20:52,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/46c92a73c22241f2b6716c1aa5a89368 is 50, key is test_row_0/B:col10/1733473251928/Put/seqid=0 2024-12-06T08:20:52,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742331_1507 (size=12151) 2024-12-06T08:20:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T08:20:52,595 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T08:20:52,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:52,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:52,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:52,596 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473312688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473312689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473312693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,748 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T08:20:52,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:52,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:52,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:52,749 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,858 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/46c92a73c22241f2b6716c1aa5a89368 2024-12-06T08:20:52,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/c928428a408944878ebfe06e99534f22 is 50, key is test_row_0/C:col10/1733473251928/Put/seqid=0 2024-12-06T08:20:52,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742332_1508 (size=12151) 2024-12-06T08:20:52,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T08:20:52,901 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:52,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T08:20:52,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:52,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:52,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:52,901 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:53,053 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:53,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T08:20:53,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:53,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:53,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:53,054 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:53,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:53,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:53,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473313192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:53,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:53,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473313193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:53,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473313197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:53,206 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:53,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T08:20:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:53,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:53,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/c928428a408944878ebfe06e99534f22 2024-12-06T08:20:53,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/d4e6d58d620a471abe62a5d7f6fafd9c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d4e6d58d620a471abe62a5d7f6fafd9c 2024-12-06T08:20:53,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d4e6d58d620a471abe62a5d7f6fafd9c, entries=200, sequenceid=232, filesize=14.2 K 2024-12-06T08:20:53,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/46c92a73c22241f2b6716c1aa5a89368 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/46c92a73c22241f2b6716c1aa5a89368 2024-12-06T08:20:53,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/46c92a73c22241f2b6716c1aa5a89368, entries=150, sequenceid=232, filesize=11.9 K 2024-12-06T08:20:53,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/c928428a408944878ebfe06e99534f22 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c928428a408944878ebfe06e99534f22 2024-12-06T08:20:53,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c928428a408944878ebfe06e99534f22, entries=150, sequenceid=232, filesize=11.9 K 2024-12-06T08:20:53,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8e01d2c9a20bbffd8abe9402655a3d81 in 1257ms, sequenceid=232, compaction requested=true 2024-12-06T08:20:53,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:53,292 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:53,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:53,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:53,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:53,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:53,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:53,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:53,292 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:53,293 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39321 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:53,293 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/A is initiating minor compaction (all files) 2024-12-06T08:20:53,293 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/A in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:53,293 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5101f1bbae244eea966bdecfc263035d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c7bad45f4387422ba53603207e17305f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d4e6d58d620a471abe62a5d7f6fafd9c] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=38.4 K 2024-12-06T08:20:53,293 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:53,293 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/B is initiating minor compaction (all files) 2024-12-06T08:20:53,293 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/B in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:53,294 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/614f69c99b504d23a5d05930ce1b7ce1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0b5cd4ac15a6426ca3308eaf04aa3053, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/46c92a73c22241f2b6716c1aa5a89368] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=36.1 K 2024-12-06T08:20:53,294 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5101f1bbae244eea966bdecfc263035d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733473249747 2024-12-06T08:20:53,294 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 614f69c99b504d23a5d05930ce1b7ce1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733473249747 2024-12-06T08:20:53,294 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7bad45f4387422ba53603207e17305f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733473249776 2024-12-06T08:20:53,295 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b5cd4ac15a6426ca3308eaf04aa3053, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733473249776 2024-12-06T08:20:53,295 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4e6d58d620a471abe62a5d7f6fafd9c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733473251921 2024-12-06T08:20:53,295 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 46c92a73c22241f2b6716c1aa5a89368, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733473251921 2024-12-06T08:20:53,303 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#B#compaction#432 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:53,303 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#A#compaction#433 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:53,303 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/e062e09088e54e8b8e9e09f0e430044f is 50, key is test_row_0/B:col10/1733473251928/Put/seqid=0 2024-12-06T08:20:53,303 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/d1a8b7a165fe414dbcdf9aadbd6bb158 is 50, key is test_row_0/A:col10/1733473251928/Put/seqid=0 2024-12-06T08:20:53,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742334_1510 (size=12731) 2024-12-06T08:20:53,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742333_1509 (size=12731) 2024-12-06T08:20:53,359 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:53,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-06T08:20:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:53,360 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T08:20:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:53,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/149791a633884c0189cf76beb0958ac3 is 50, key is test_row_0/A:col10/1733473252069/Put/seqid=0 2024-12-06T08:20:53,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742335_1511 (size=12151) 2024-12-06T08:20:53,370 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/149791a633884c0189cf76beb0958ac3 2024-12-06T08:20:53,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/ea790a77a2c243e6bb7d96eec3d5f9ff is 50, key is test_row_0/B:col10/1733473252069/Put/seqid=0 2024-12-06T08:20:53,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742336_1512 (size=12151) 2024-12-06T08:20:53,382 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/ea790a77a2c243e6bb7d96eec3d5f9ff 2024-12-06T08:20:53,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/720e78e124d547d8ad79b5f0adac05b2 is 50, key is test_row_0/C:col10/1733473252069/Put/seqid=0 2024-12-06T08:20:53,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742337_1513 (size=12151) 2024-12-06T08:20:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T08:20:53,716 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/e062e09088e54e8b8e9e09f0e430044f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/e062e09088e54e8b8e9e09f0e430044f 2024-12-06T08:20:53,716 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/d1a8b7a165fe414dbcdf9aadbd6bb158 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d1a8b7a165fe414dbcdf9aadbd6bb158 2024-12-06T08:20:53,721 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/A of 8e01d2c9a20bbffd8abe9402655a3d81 into d1a8b7a165fe414dbcdf9aadbd6bb158(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:53,721 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:53,721 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/A, priority=13, startTime=1733473253292; duration=0sec 2024-12-06T08:20:53,721 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:53,721 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:A 2024-12-06T08:20:53,721 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:53,722 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/B of 8e01d2c9a20bbffd8abe9402655a3d81 into e062e09088e54e8b8e9e09f0e430044f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:53,722 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:53,722 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/B, priority=13, startTime=1733473253292; duration=0sec 2024-12-06T08:20:53,722 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:53,722 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:B 2024-12-06T08:20:53,722 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:53,723 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/C is initiating minor compaction (all files) 2024-12-06T08:20:53,723 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/C in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:53,723 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/4aa02e2cadd04318b43572493474431a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/28af1588ec4a4036b915ea11ad6027ee, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c928428a408944878ebfe06e99534f22] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=36.1 K 2024-12-06T08:20:53,723 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4aa02e2cadd04318b43572493474431a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733473249747 2024-12-06T08:20:53,723 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28af1588ec4a4036b915ea11ad6027ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733473249776 2024-12-06T08:20:53,724 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c928428a408944878ebfe06e99534f22, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733473251921 2024-12-06T08:20:53,730 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#C#compaction#437 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:53,731 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/50abe4ba20e64e898c46cca5a4b8c1c2 is 50, key is test_row_0/C:col10/1733473251928/Put/seqid=0 2024-12-06T08:20:53,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742338_1514 (size=12731) 2024-12-06T08:20:53,792 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/720e78e124d547d8ad79b5f0adac05b2 2024-12-06T08:20:53,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/149791a633884c0189cf76beb0958ac3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/149791a633884c0189cf76beb0958ac3 2024-12-06T08:20:53,799 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/149791a633884c0189cf76beb0958ac3, entries=150, sequenceid=255, filesize=11.9 K 2024-12-06T08:20:53,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/ea790a77a2c243e6bb7d96eec3d5f9ff as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/ea790a77a2c243e6bb7d96eec3d5f9ff 2024-12-06T08:20:53,804 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/ea790a77a2c243e6bb7d96eec3d5f9ff, entries=150, sequenceid=255, filesize=11.9 K 2024-12-06T08:20:53,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/720e78e124d547d8ad79b5f0adac05b2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/720e78e124d547d8ad79b5f0adac05b2 2024-12-06T08:20:53,808 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/720e78e124d547d8ad79b5f0adac05b2, entries=150, sequenceid=255, filesize=11.9 K 2024-12-06T08:20:53,808 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 8e01d2c9a20bbffd8abe9402655a3d81 in 448ms, sequenceid=255, compaction requested=false 2024-12-06T08:20:53,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:53,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:53,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-06T08:20:53,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-06T08:20:53,811 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-06T08:20:53,811 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5190 sec 2024-12-06T08:20:53,812 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.5220 sec 2024-12-06T08:20:53,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:53,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:20:53,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:53,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:53,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:53,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:53,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:53,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:53,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/9b1b009b26c443b3b8ea6df85a6992b0 is 50, key is test_row_0/A:col10/1733473253954/Put/seqid=0 2024-12-06T08:20:54,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742339_1515 (size=14741) 2024-12-06T08:20:54,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473314061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473314064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,139 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/50abe4ba20e64e898c46cca5a4b8c1c2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/50abe4ba20e64e898c46cca5a4b8c1c2 2024-12-06T08:20:54,142 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/C of 8e01d2c9a20bbffd8abe9402655a3d81 into 50abe4ba20e64e898c46cca5a4b8c1c2(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:54,142 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:54,142 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/C, priority=13, startTime=1733473253292; duration=0sec 2024-12-06T08:20:54,142 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:54,142 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:C 2024-12-06T08:20:54,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473314169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473314170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473314199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473314201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473314207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473314375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473314376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-06T08:20:54,394 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-06T08:20:54,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:54,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-12-06T08:20:54,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T08:20:54,397 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:54,397 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:54,397 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:54,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/9b1b009b26c443b3b8ea6df85a6992b0 2024-12-06T08:20:54,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/03e17198aa1f4206b65ac0521d07f3e0 is 50, key is test_row_0/B:col10/1733473253954/Put/seqid=0 2024-12-06T08:20:54,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742340_1516 (size=12301) 2024-12-06T08:20:54,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T08:20:54,552 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-06T08:20:54,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:54,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:54,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:54,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:54,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:54,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:54,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473314678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:54,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473314680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T08:20:54,705 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-06T08:20:54,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:54,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:54,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:54,705 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:54,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:54,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:54,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/03e17198aa1f4206b65ac0521d07f3e0 2024-12-06T08:20:54,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/6749423fbe3a477380085ce96c5f0ff1 is 50, key is test_row_0/C:col10/1733473253954/Put/seqid=0 2024-12-06T08:20:54,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742341_1517 (size=12301) 2024-12-06T08:20:54,824 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/6749423fbe3a477380085ce96c5f0ff1 2024-12-06T08:20:54,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/9b1b009b26c443b3b8ea6df85a6992b0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9b1b009b26c443b3b8ea6df85a6992b0 2024-12-06T08:20:54,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9b1b009b26c443b3b8ea6df85a6992b0, entries=200, sequenceid=268, filesize=14.4 K 2024-12-06T08:20:54,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/03e17198aa1f4206b65ac0521d07f3e0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/03e17198aa1f4206b65ac0521d07f3e0 2024-12-06T08:20:54,842 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/03e17198aa1f4206b65ac0521d07f3e0, entries=150, sequenceid=268, filesize=12.0 K 2024-12-06T08:20:54,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/6749423fbe3a477380085ce96c5f0ff1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/6749423fbe3a477380085ce96c5f0ff1 2024-12-06T08:20:54,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/6749423fbe3a477380085ce96c5f0ff1, entries=150, sequenceid=268, filesize=12.0 K 2024-12-06T08:20:54,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8e01d2c9a20bbffd8abe9402655a3d81 in 856ms, sequenceid=268, compaction requested=true 2024-12-06T08:20:54,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:54,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:54,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:54,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:54,848 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:54,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:54,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:54,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-06T08:20:54,848 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:54,849 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39623 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:54,849 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:54,849 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/A is initiating minor compaction (all files) 2024-12-06T08:20:54,849 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/B is initiating minor compaction (all files) 2024-12-06T08:20:54,849 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/B in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:54,849 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/A in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:54,849 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/e062e09088e54e8b8e9e09f0e430044f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/ea790a77a2c243e6bb7d96eec3d5f9ff, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/03e17198aa1f4206b65ac0521d07f3e0] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=36.3 K 2024-12-06T08:20:54,849 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d1a8b7a165fe414dbcdf9aadbd6bb158, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/149791a633884c0189cf76beb0958ac3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9b1b009b26c443b3b8ea6df85a6992b0] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=38.7 K 2024-12-06T08:20:54,850 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e062e09088e54e8b8e9e09f0e430044f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733473251921 2024-12-06T08:20:54,850 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1a8b7a165fe414dbcdf9aadbd6bb158, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733473251921 2024-12-06T08:20:54,850 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ea790a77a2c243e6bb7d96eec3d5f9ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733473252069 2024-12-06T08:20:54,850 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 149791a633884c0189cf76beb0958ac3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733473252069 2024-12-06T08:20:54,851 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 03e17198aa1f4206b65ac0521d07f3e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1733473253954 2024-12-06T08:20:54,851 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b1b009b26c443b3b8ea6df85a6992b0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1733473253954 2024-12-06T08:20:54,857 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:54,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-06T08:20:54,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:54,858 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:20:54,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:54,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:54,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:54,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:54,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:54,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:54,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/ba959d6193024aee811f2f3b007c54ac is 50, key is test_row_0/A:col10/1733473254050/Put/seqid=0 2024-12-06T08:20:54,874 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#A#compaction#442 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:54,875 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/96435e0920854346a0f45c338ec77978 is 50, key is test_row_0/A:col10/1733473253954/Put/seqid=0 2024-12-06T08:20:54,877 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#B#compaction#443 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:54,878 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/de52c4639cf64a38a2e02caa5b61e619 is 50, key is test_row_0/B:col10/1733473253954/Put/seqid=0 2024-12-06T08:20:54,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742343_1519 (size=12983) 2024-12-06T08:20:54,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742344_1520 (size=12983) 2024-12-06T08:20:54,906 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/96435e0920854346a0f45c338ec77978 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/96435e0920854346a0f45c338ec77978 2024-12-06T08:20:54,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742342_1518 (size=12301) 2024-12-06T08:20:54,910 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/A of 8e01d2c9a20bbffd8abe9402655a3d81 into 96435e0920854346a0f45c338ec77978(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:54,910 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:54,910 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/A, priority=13, startTime=1733473254848; duration=0sec 2024-12-06T08:20:54,911 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:54,911 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:A 2024-12-06T08:20:54,911 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:54,913 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:54,913 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/C is initiating minor compaction (all files) 2024-12-06T08:20:54,914 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/C in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:54,914 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/50abe4ba20e64e898c46cca5a4b8c1c2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/720e78e124d547d8ad79b5f0adac05b2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/6749423fbe3a477380085ce96c5f0ff1] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=36.3 K 2024-12-06T08:20:54,914 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50abe4ba20e64e898c46cca5a4b8c1c2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733473251921 2024-12-06T08:20:54,914 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 720e78e124d547d8ad79b5f0adac05b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733473252069 2024-12-06T08:20:54,915 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6749423fbe3a477380085ce96c5f0ff1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1733473253954 2024-12-06T08:20:54,923 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#C#compaction#444 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:54,924 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/c59ea82a5bc8437fb97214db4bd9c31e is 50, key is test_row_0/C:col10/1733473253954/Put/seqid=0 2024-12-06T08:20:54,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742345_1521 (size=12983) 2024-12-06T08:20:54,942 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/c59ea82a5bc8437fb97214db4bd9c31e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c59ea82a5bc8437fb97214db4bd9c31e 2024-12-06T08:20:54,950 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/C of 8e01d2c9a20bbffd8abe9402655a3d81 into c59ea82a5bc8437fb97214db4bd9c31e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:54,950 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:54,950 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/C, priority=13, startTime=1733473254848; duration=0sec 2024-12-06T08:20:54,950 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:54,950 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:C 2024-12-06T08:20:54,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T08:20:55,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:55,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:55,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:55,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473315198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:55,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:55,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473315200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:55,298 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/de52c4639cf64a38a2e02caa5b61e619 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/de52c4639cf64a38a2e02caa5b61e619 2024-12-06T08:20:55,302 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/B of 8e01d2c9a20bbffd8abe9402655a3d81 into de52c4639cf64a38a2e02caa5b61e619(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:55,302 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:55,302 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/B, priority=13, startTime=1733473254848; duration=0sec 2024-12-06T08:20:55,302 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:55,302 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:B 2024-12-06T08:20:55,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:55,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473315304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:55,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:55,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473315308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:55,310 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/ba959d6193024aee811f2f3b007c54ac 2024-12-06T08:20:55,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/1eaa4624f24e4d0e805ff10dc1092e52 is 50, key is test_row_0/B:col10/1733473254050/Put/seqid=0 2024-12-06T08:20:55,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742346_1522 (size=12301) 2024-12-06T08:20:55,322 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/1eaa4624f24e4d0e805ff10dc1092e52 2024-12-06T08:20:55,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/bdac32d2891847f48dfbd41b08899e45 is 50, key is test_row_0/C:col10/1733473254050/Put/seqid=0 2024-12-06T08:20:55,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742347_1523 (size=12301) 2024-12-06T08:20:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T08:20:55,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473315507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:55,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473315510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:55,734 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/bdac32d2891847f48dfbd41b08899e45 2024-12-06T08:20:55,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/ba959d6193024aee811f2f3b007c54ac as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/ba959d6193024aee811f2f3b007c54ac 2024-12-06T08:20:55,742 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/ba959d6193024aee811f2f3b007c54ac, entries=150, sequenceid=294, filesize=12.0 K 2024-12-06T08:20:55,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/1eaa4624f24e4d0e805ff10dc1092e52 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1eaa4624f24e4d0e805ff10dc1092e52 2024-12-06T08:20:55,746 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1eaa4624f24e4d0e805ff10dc1092e52, entries=150, sequenceid=294, filesize=12.0 K 2024-12-06T08:20:55,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/bdac32d2891847f48dfbd41b08899e45 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/bdac32d2891847f48dfbd41b08899e45 2024-12-06T08:20:55,750 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/bdac32d2891847f48dfbd41b08899e45, entries=150, sequenceid=294, filesize=12.0 K 2024-12-06T08:20:55,751 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 8e01d2c9a20bbffd8abe9402655a3d81 in 893ms, sequenceid=294, compaction requested=false 2024-12-06T08:20:55,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:55,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:55,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-06T08:20:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-06T08:20:55,754 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-06T08:20:55,754 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3560 sec 2024-12-06T08:20:55,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.3600 sec 2024-12-06T08:20:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:55,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T08:20:55,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:55,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:55,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:55,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:55,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:55,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:55,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/5228f861635c49ae87512d0b3325d30f is 50, key is test_row_0/A:col10/1733473255187/Put/seqid=0 2024-12-06T08:20:55,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742348_1524 (size=14741) 2024-12-06T08:20:55,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/5228f861635c49ae87512d0b3325d30f 2024-12-06T08:20:55,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/403649a7a86645b7b8be31846563b706 is 50, key is test_row_0/B:col10/1733473255187/Put/seqid=0 2024-12-06T08:20:55,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742349_1525 (size=12301) 2024-12-06T08:20:55,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:55,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473315878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:55,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:55,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473315883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:55,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:55,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473315986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:55,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:55,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473315990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473316189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473316194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40448 deadline: 1733473316217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,219 DEBUG [Thread-2041 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:20:56,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40498 deadline: 1733473316219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,222 DEBUG [Thread-2043 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:20:56,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40480 deadline: 1733473316221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,225 DEBUG [Thread-2035 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., hostname=b6b797fc3981,38041,1733473111442, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:20:56,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/403649a7a86645b7b8be31846563b706 2024-12-06T08:20:56,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/119f2c5d869b46a6a190dbf0bf715990 is 50, key is test_row_0/C:col10/1733473255187/Put/seqid=0 2024-12-06T08:20:56,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742350_1526 (size=12301) 2024-12-06T08:20:56,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/119f2c5d869b46a6a190dbf0bf715990 2024-12-06T08:20:56,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/5228f861635c49ae87512d0b3325d30f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5228f861635c49ae87512d0b3325d30f 2024-12-06T08:20:56,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5228f861635c49ae87512d0b3325d30f, entries=200, sequenceid=309, filesize=14.4 K 2024-12-06T08:20:56,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/403649a7a86645b7b8be31846563b706 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/403649a7a86645b7b8be31846563b706 2024-12-06T08:20:56,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/403649a7a86645b7b8be31846563b706, entries=150, sequenceid=309, filesize=12.0 K 2024-12-06T08:20:56,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/119f2c5d869b46a6a190dbf0bf715990 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/119f2c5d869b46a6a190dbf0bf715990 2024-12-06T08:20:56,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/119f2c5d869b46a6a190dbf0bf715990, entries=150, sequenceid=309, filesize=12.0 K 2024-12-06T08:20:56,262 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 8e01d2c9a20bbffd8abe9402655a3d81 in 448ms, sequenceid=309, compaction requested=true 2024-12-06T08:20:56,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:56,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:56,262 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:56,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:56,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:56,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:56,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:56,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:56,263 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:56,263 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:56,263 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:56,263 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/B is initiating minor compaction (all files) 2024-12-06T08:20:56,263 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/A is initiating minor compaction (all files) 2024-12-06T08:20:56,263 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/B in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:56,263 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/A in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:56,264 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/de52c4639cf64a38a2e02caa5b61e619, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1eaa4624f24e4d0e805ff10dc1092e52, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/403649a7a86645b7b8be31846563b706] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=36.7 K 2024-12-06T08:20:56,264 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/96435e0920854346a0f45c338ec77978, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/ba959d6193024aee811f2f3b007c54ac, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5228f861635c49ae87512d0b3325d30f] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=39.1 K 2024-12-06T08:20:56,264 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting de52c4639cf64a38a2e02caa5b61e619, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1733473253954 2024-12-06T08:20:56,264 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96435e0920854346a0f45c338ec77978, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1733473253954 2024-12-06T08:20:56,264 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1eaa4624f24e4d0e805ff10dc1092e52, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733473254050 2024-12-06T08:20:56,264 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba959d6193024aee811f2f3b007c54ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733473254050 2024-12-06T08:20:56,265 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5228f861635c49ae87512d0b3325d30f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733473255187 2024-12-06T08:20:56,265 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 403649a7a86645b7b8be31846563b706, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733473255187 2024-12-06T08:20:56,271 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#B#compaction#450 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:56,272 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/039b2886ff4c4e588ca2db2ec3c32778 is 50, key is test_row_0/B:col10/1733473255187/Put/seqid=0 2024-12-06T08:20:56,272 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#A#compaction#451 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:56,273 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/118973a6f40b46debe15092f80e205cf is 50, key is test_row_0/A:col10/1733473255187/Put/seqid=0 2024-12-06T08:20:56,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742352_1528 (size=13085) 2024-12-06T08:20:56,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742351_1527 (size=13085) 2024-12-06T08:20:56,282 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/118973a6f40b46debe15092f80e205cf as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/118973a6f40b46debe15092f80e205cf 2024-12-06T08:20:56,283 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/039b2886ff4c4e588ca2db2ec3c32778 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/039b2886ff4c4e588ca2db2ec3c32778 2024-12-06T08:20:56,286 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/A of 8e01d2c9a20bbffd8abe9402655a3d81 into 118973a6f40b46debe15092f80e205cf(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:56,286 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:56,286 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/A, priority=13, startTime=1733473256262; duration=0sec 2024-12-06T08:20:56,287 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:56,287 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:A 2024-12-06T08:20:56,287 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:20:56,287 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/B of 8e01d2c9a20bbffd8abe9402655a3d81 into 039b2886ff4c4e588ca2db2ec3c32778(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:56,287 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:56,287 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/B, priority=13, startTime=1733473256262; duration=0sec 2024-12-06T08:20:56,288 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:56,288 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:B 2024-12-06T08:20:56,288 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:20:56,288 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/C is initiating minor compaction (all files) 2024-12-06T08:20:56,288 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/C in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:56,288 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c59ea82a5bc8437fb97214db4bd9c31e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/bdac32d2891847f48dfbd41b08899e45, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/119f2c5d869b46a6a190dbf0bf715990] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=36.7 K 2024-12-06T08:20:56,289 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c59ea82a5bc8437fb97214db4bd9c31e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1733473253954 2024-12-06T08:20:56,289 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdac32d2891847f48dfbd41b08899e45, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733473254050 2024-12-06T08:20:56,289 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 119f2c5d869b46a6a190dbf0bf715990, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733473255187 2024-12-06T08:20:56,301 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#C#compaction#452 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:56,301 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/abde12ac13de430fbea8117be7a66155 is 50, key is test_row_0/C:col10/1733473255187/Put/seqid=0 2024-12-06T08:20:56,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742353_1529 (size=13085) 2024-12-06T08:20:56,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:56,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:20:56,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:56,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:56,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:56,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:56,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:56,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:56,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/a737a43753fe46519b0ab3c88b9594c8 is 50, key is test_row_0/A:col10/1733473256495/Put/seqid=0 2024-12-06T08:20:56,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-06T08:20:56,501 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-06T08:20:56,503 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:56,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-12-06T08:20:56,504 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:56,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T08:20:56,505 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:56,505 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:56,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742354_1530 (size=12301) 2024-12-06T08:20:56,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473316512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473316515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T08:20:56,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473316616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473316619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,658 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,658 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-06T08:20:56,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:56,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:56,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:56,658 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:56,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:56,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:56,711 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/abde12ac13de430fbea8117be7a66155 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/abde12ac13de430fbea8117be7a66155 2024-12-06T08:20:56,715 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/C of 8e01d2c9a20bbffd8abe9402655a3d81 into abde12ac13de430fbea8117be7a66155(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:56,716 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:56,716 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/C, priority=13, startTime=1733473256262; duration=0sec 2024-12-06T08:20:56,716 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:56,716 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:C 2024-12-06T08:20:56,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T08:20:56,811 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-06T08:20:56,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:56,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:56,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:56,812 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:56,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:56,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:56,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473316821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:56,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473316823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/a737a43753fe46519b0ab3c88b9594c8 2024-12-06T08:20:56,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/f65d0df571d0462389af8b0dbeb7e6d9 is 50, key is test_row_0/B:col10/1733473256495/Put/seqid=0 2024-12-06T08:20:56,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742355_1531 (size=12301) 2024-12-06T08:20:56,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/f65d0df571d0462389af8b0dbeb7e6d9 2024-12-06T08:20:56,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/9b7bb15366194e6eae628c08ba873f60 is 50, key is test_row_0/C:col10/1733473256495/Put/seqid=0 2024-12-06T08:20:56,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742356_1532 (size=12301) 2024-12-06T08:20:56,964 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:56,964 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-06T08:20:56,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:56,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:56,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:56,964 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:56,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:56,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:57,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T08:20:57,116 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:57,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-06T08:20:57,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:57,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:57,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:57,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:57,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:57,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:57,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:57,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473317125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:57,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:57,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473317131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:57,269 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:57,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-06T08:20:57,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:57,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:57,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:57,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:57,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:57,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:57,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/9b7bb15366194e6eae628c08ba873f60 2024-12-06T08:20:57,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/a737a43753fe46519b0ab3c88b9594c8 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a737a43753fe46519b0ab3c88b9594c8 2024-12-06T08:20:57,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a737a43753fe46519b0ab3c88b9594c8, entries=150, sequenceid=336, filesize=12.0 K 2024-12-06T08:20:57,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/f65d0df571d0462389af8b0dbeb7e6d9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/f65d0df571d0462389af8b0dbeb7e6d9 2024-12-06T08:20:57,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/f65d0df571d0462389af8b0dbeb7e6d9, entries=150, sequenceid=336, filesize=12.0 K 2024-12-06T08:20:57,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/9b7bb15366194e6eae628c08ba873f60 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/9b7bb15366194e6eae628c08ba873f60 2024-12-06T08:20:57,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/9b7bb15366194e6eae628c08ba873f60, entries=150, sequenceid=336, filesize=12.0 K 2024-12-06T08:20:57,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 8e01d2c9a20bbffd8abe9402655a3d81 in 870ms, sequenceid=336, compaction requested=false 2024-12-06T08:20:57,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:57,421 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:57,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-06T08:20:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:57,422 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:20:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:57,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/9495a6bde2fa4570badcf5d21174fa1b is 50, key is test_row_0/A:col10/1733473256513/Put/seqid=0 2024-12-06T08:20:57,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742357_1533 (size=12301) 2024-12-06T08:20:57,440 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/9495a6bde2fa4570badcf5d21174fa1b 2024-12-06T08:20:57,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/2b96bd2edbd746c5bd3e3e62d2bdf19e is 50, key is test_row_0/B:col10/1733473256513/Put/seqid=0 2024-12-06T08:20:57,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742358_1534 (size=12301) 2024-12-06T08:20:57,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T08:20:57,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:57,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:57,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473317693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:57,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473317693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:57,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473317800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:57,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473317800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:57,850 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/2b96bd2edbd746c5bd3e3e62d2bdf19e 2024-12-06T08:20:57,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/1afacef486304ebc911a3000d5ecc2e4 is 50, key is test_row_0/C:col10/1733473256513/Put/seqid=0 2024-12-06T08:20:57,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742359_1535 (size=12301) 2024-12-06T08:20:57,860 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/1afacef486304ebc911a3000d5ecc2e4 2024-12-06T08:20:57,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/9495a6bde2fa4570badcf5d21174fa1b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9495a6bde2fa4570badcf5d21174fa1b 2024-12-06T08:20:57,866 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9495a6bde2fa4570badcf5d21174fa1b, entries=150, sequenceid=348, filesize=12.0 K 2024-12-06T08:20:57,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/2b96bd2edbd746c5bd3e3e62d2bdf19e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/2b96bd2edbd746c5bd3e3e62d2bdf19e 2024-12-06T08:20:57,870 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/2b96bd2edbd746c5bd3e3e62d2bdf19e, entries=150, sequenceid=348, filesize=12.0 K 2024-12-06T08:20:57,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/1afacef486304ebc911a3000d5ecc2e4 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/1afacef486304ebc911a3000d5ecc2e4 2024-12-06T08:20:57,874 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/1afacef486304ebc911a3000d5ecc2e4, entries=150, sequenceid=348, filesize=12.0 K 2024-12-06T08:20:57,874 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8e01d2c9a20bbffd8abe9402655a3d81 in 452ms, sequenceid=348, compaction requested=true 2024-12-06T08:20:57,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:57,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:57,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-12-06T08:20:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-12-06T08:20:57,878 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-06T08:20:57,878 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3710 sec 2024-12-06T08:20:57,879 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.3760 sec 2024-12-06T08:20:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:20:58,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-06T08:20:58,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:58,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:58,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:58,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:58,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:58,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:58,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/f9f702301a0b4804ab27067109d7c6dc is 50, key is test_row_0/A:col10/1733473258005/Put/seqid=0 2024-12-06T08:20:58,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742360_1536 (size=12301) 2024-12-06T08:20:58,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473318022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:58,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473318027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:58,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:58,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473318128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:58,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:58,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473318134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:58,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:58,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473318334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:58,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:58,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473318339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:58,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/f9f702301a0b4804ab27067109d7c6dc 2024-12-06T08:20:58,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/3e1139f961074068be4659a03783ccd0 is 50, key is test_row_0/B:col10/1733473258005/Put/seqid=0 2024-12-06T08:20:58,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742361_1537 (size=12301) 2024-12-06T08:20:58,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-06T08:20:58,608 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-06T08:20:58,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:20:58,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-12-06T08:20:58,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T08:20:58,612 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:20:58,612 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:20:58,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:20:58,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473318640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:58,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473318647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:58,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T08:20:58,764 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:58,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-06T08:20:58,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:58,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:58,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:58,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:58,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:58,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:58,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/3e1139f961074068be4659a03783ccd0 2024-12-06T08:20:58,848 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5522820522334efc878c887b6fb79a41 is 50, key is test_row_0/C:col10/1733473258005/Put/seqid=0 2024-12-06T08:20:58,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742362_1538 (size=12301) 2024-12-06T08:20:58,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T08:20:58,917 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:58,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-06T08:20:58,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:58,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:58,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:58,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:58,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:58,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:59,069 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:59,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-06T08:20:59,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:59,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:59,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:59,070 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:59,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:59,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:59,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40478 deadline: 1733473319145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:59,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:20:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40472 deadline: 1733473319150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:20:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T08:20:59,222 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:59,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-06T08:20:59,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:59,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. as already flushing 2024-12-06T08:20:59,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:59,223 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:59,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:20:59,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5522820522334efc878c887b6fb79a41 2024-12-06T08:20:59,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/f9f702301a0b4804ab27067109d7c6dc as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f9f702301a0b4804ab27067109d7c6dc 2024-12-06T08:20:59,260 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f9f702301a0b4804ab27067109d7c6dc, entries=150, sequenceid=374, filesize=12.0 K 2024-12-06T08:20:59,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/3e1139f961074068be4659a03783ccd0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/3e1139f961074068be4659a03783ccd0 2024-12-06T08:20:59,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/3e1139f961074068be4659a03783ccd0, entries=150, sequenceid=374, filesize=12.0 K 2024-12-06T08:20:59,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/5522820522334efc878c887b6fb79a41 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5522820522334efc878c887b6fb79a41 2024-12-06T08:20:59,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5522820522334efc878c887b6fb79a41, entries=150, sequenceid=374, filesize=12.0 K 2024-12-06T08:20:59,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 8e01d2c9a20bbffd8abe9402655a3d81 in 1263ms, sequenceid=374, compaction requested=true 2024-12-06T08:20:59,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:59,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:20:59,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:59,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:20:59,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:59,269 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:59,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e01d2c9a20bbffd8abe9402655a3d81:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:20:59,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:59,269 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:59,270 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:59,270 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:59,270 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/A is initiating minor compaction (all files) 2024-12-06T08:20:59,270 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/B is initiating minor compaction (all files) 2024-12-06T08:20:59,270 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/A in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:59,270 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/B in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:59,271 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/039b2886ff4c4e588ca2db2ec3c32778, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/f65d0df571d0462389af8b0dbeb7e6d9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/2b96bd2edbd746c5bd3e3e62d2bdf19e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/3e1139f961074068be4659a03783ccd0] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=48.8 K 2024-12-06T08:20:59,271 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/118973a6f40b46debe15092f80e205cf, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a737a43753fe46519b0ab3c88b9594c8, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9495a6bde2fa4570badcf5d21174fa1b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f9f702301a0b4804ab27067109d7c6dc] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=48.8 K 2024-12-06T08:20:59,271 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 118973a6f40b46debe15092f80e205cf, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733473255187 2024-12-06T08:20:59,271 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 039b2886ff4c4e588ca2db2ec3c32778, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733473255187 2024-12-06T08:20:59,271 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f65d0df571d0462389af8b0dbeb7e6d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733473255877 2024-12-06T08:20:59,271 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting a737a43753fe46519b0ab3c88b9594c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733473255877 2024-12-06T08:20:59,271 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b96bd2edbd746c5bd3e3e62d2bdf19e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1733473256500 2024-12-06T08:20:59,272 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9495a6bde2fa4570badcf5d21174fa1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1733473256500 2024-12-06T08:20:59,272 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e1139f961074068be4659a03783ccd0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733473257690 2024-12-06T08:20:59,272 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9f702301a0b4804ab27067109d7c6dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733473257690 2024-12-06T08:20:59,282 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#B#compaction#462 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:59,283 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/4fae7d7a32de4e9e9849617aa64f0877 is 50, key is test_row_0/B:col10/1733473258005/Put/seqid=0 2024-12-06T08:20:59,286 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#A#compaction#463 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:59,286 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/6a96176364aa4d248e8e810b6bdd1d93 is 50, key is test_row_0/A:col10/1733473258005/Put/seqid=0 2024-12-06T08:20:59,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742363_1539 (size=13221) 2024-12-06T08:20:59,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742364_1540 (size=13221) 2024-12-06T08:20:59,299 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/4fae7d7a32de4e9e9849617aa64f0877 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/4fae7d7a32de4e9e9849617aa64f0877 2024-12-06T08:20:59,303 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/B of 8e01d2c9a20bbffd8abe9402655a3d81 into 4fae7d7a32de4e9e9849617aa64f0877(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:59,303 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:59,303 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/B, priority=12, startTime=1733473259269; duration=0sec 2024-12-06T08:20:59,303 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:20:59,303 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:B 2024-12-06T08:20:59,303 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:20:59,305 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:20:59,305 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 8e01d2c9a20bbffd8abe9402655a3d81/C is initiating minor compaction (all files) 2024-12-06T08:20:59,305 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e01d2c9a20bbffd8abe9402655a3d81/C in TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:59,305 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/abde12ac13de430fbea8117be7a66155, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/9b7bb15366194e6eae628c08ba873f60, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/1afacef486304ebc911a3000d5ecc2e4, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5522820522334efc878c887b6fb79a41] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp, totalSize=48.8 K 2024-12-06T08:20:59,305 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting abde12ac13de430fbea8117be7a66155, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733473255187 2024-12-06T08:20:59,306 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b7bb15366194e6eae628c08ba873f60, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1733473255877 2024-12-06T08:20:59,306 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1afacef486304ebc911a3000d5ecc2e4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1733473256500 2024-12-06T08:20:59,306 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5522820522334efc878c887b6fb79a41, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733473257690 2024-12-06T08:20:59,314 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e01d2c9a20bbffd8abe9402655a3d81#C#compaction#464 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:20:59,314 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/f60068cf0ac04dc7ba75aed47d0cff6e is 50, key is test_row_0/C:col10/1733473258005/Put/seqid=0 2024-12-06T08:20:59,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742365_1541 (size=13221) 2024-12-06T08:20:59,331 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/f60068cf0ac04dc7ba75aed47d0cff6e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/f60068cf0ac04dc7ba75aed47d0cff6e 2024-12-06T08:20:59,336 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/C of 8e01d2c9a20bbffd8abe9402655a3d81 into f60068cf0ac04dc7ba75aed47d0cff6e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:59,336 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:59,336 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/C, priority=12, startTime=1733473259269; duration=0sec 2024-12-06T08:20:59,336 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:59,336 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:C 2024-12-06T08:20:59,375 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:20:59,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-06T08:20:59,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:59,376 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-06T08:20:59,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:20:59,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:59,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:20:59,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:59,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:20:59,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:20:59,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/fbbc4689d18d4ed7844cc4f4e73dd912 is 50, key is test_row_0/A:col10/1733473258012/Put/seqid=0 2024-12-06T08:20:59,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742366_1542 (size=12301) 2024-12-06T08:20:59,384 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/fbbc4689d18d4ed7844cc4f4e73dd912 2024-12-06T08:20:59,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/6cce8539aeac4d948585d6653b9a47eb is 50, key is test_row_0/B:col10/1733473258012/Put/seqid=0 2024-12-06T08:20:59,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742367_1543 (size=12301) 2024-12-06T08:20:59,395 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/6cce8539aeac4d948585d6653b9a47eb 2024-12-06T08:20:59,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/95e3175e9ff44aa298e8362bb4f5b27f is 50, key is test_row_0/C:col10/1733473258012/Put/seqid=0 2024-12-06T08:20:59,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742368_1544 (size=12301) 2024-12-06T08:20:59,697 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/6a96176364aa4d248e8e810b6bdd1d93 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/6a96176364aa4d248e8e810b6bdd1d93 2024-12-06T08:20:59,701 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e01d2c9a20bbffd8abe9402655a3d81/A of 8e01d2c9a20bbffd8abe9402655a3d81 into 6a96176364aa4d248e8e810b6bdd1d93(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:20:59,702 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:59,702 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81., storeName=8e01d2c9a20bbffd8abe9402655a3d81/A, priority=12, startTime=1733473259269; duration=0sec 2024-12-06T08:20:59,702 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:20:59,702 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e01d2c9a20bbffd8abe9402655a3d81:A 2024-12-06T08:20:59,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T08:20:59,738 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:20:59,807 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/95e3175e9ff44aa298e8362bb4f5b27f 2024-12-06T08:20:59,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/fbbc4689d18d4ed7844cc4f4e73dd912 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/fbbc4689d18d4ed7844cc4f4e73dd912 2024-12-06T08:20:59,815 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/fbbc4689d18d4ed7844cc4f4e73dd912, entries=150, sequenceid=386, filesize=12.0 K 2024-12-06T08:20:59,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/6cce8539aeac4d948585d6653b9a47eb as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6cce8539aeac4d948585d6653b9a47eb 2024-12-06T08:20:59,818 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6cce8539aeac4d948585d6653b9a47eb, entries=150, sequenceid=386, filesize=12.0 K 2024-12-06T08:20:59,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/95e3175e9ff44aa298e8362bb4f5b27f as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/95e3175e9ff44aa298e8362bb4f5b27f 2024-12-06T08:20:59,822 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/95e3175e9ff44aa298e8362bb4f5b27f, entries=150, sequenceid=386, filesize=12.0 K 2024-12-06T08:20:59,823 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 8e01d2c9a20bbffd8abe9402655a3d81 in 448ms, sequenceid=386, compaction requested=false 2024-12-06T08:20:59,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:20:59,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:20:59,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-12-06T08:20:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-12-06T08:20:59,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-06T08:20:59,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2120 sec 2024-12-06T08:20:59,827 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.2170 sec 2024-12-06T08:21:00,043 DEBUG [Thread-2048 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bc486e1 to 127.0.0.1:65195 2024-12-06T08:21:00,043 DEBUG [Thread-2048 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,043 DEBUG [Thread-2052 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6050584c to 127.0.0.1:65195 2024-12-06T08:21:00,043 DEBUG [Thread-2052 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,044 DEBUG [Thread-2050 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2070263a to 127.0.0.1:65195 2024-12-06T08:21:00,044 DEBUG [Thread-2050 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,045 DEBUG [Thread-2046 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:65195 2024-12-06T08:21:00,045 DEBUG [Thread-2046 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,045 DEBUG [Thread-2054 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dd48863 to 127.0.0.1:65195 2024-12-06T08:21:00,045 DEBUG [Thread-2054 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,156 DEBUG [Thread-2039 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65df2359 to 127.0.0.1:65195 2024-12-06T08:21:00,156 DEBUG [Thread-2039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,162 DEBUG [Thread-2037 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63cefe40 to 127.0.0.1:65195 2024-12-06T08:21:00,162 DEBUG [Thread-2037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,227 DEBUG [Thread-2035 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x301741f1 to 127.0.0.1:65195 2024-12-06T08:21:00,227 DEBUG [Thread-2041 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d0ab200 to 127.0.0.1:65195 2024-12-06T08:21:00,227 DEBUG [Thread-2035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,227 DEBUG [Thread-2041 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,234 DEBUG [Thread-2043 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5871c039 to 127.0.0.1:65195 2024-12-06T08:21:00,234 DEBUG [Thread-2043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-06T08:21:00,715 INFO [Thread-2045 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2773 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8319 rows 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2782 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8346 rows 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2788 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8364 rows 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2779 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8337 rows 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2791 2024-12-06T08:21:00,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8373 rows 2024-12-06T08:21:00,715 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:21:00,715 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3cb726fe to 127.0.0.1:65195 2024-12-06T08:21:00,715 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:00,717 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T08:21:00,718 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T08:21:00,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:00,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T08:21:00,721 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473260721"}]},"ts":"1733473260721"} 2024-12-06T08:21:00,722 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T08:21:00,724 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T08:21:00,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T08:21:00,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e01d2c9a20bbffd8abe9402655a3d81, UNASSIGN}] 2024-12-06T08:21:00,726 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e01d2c9a20bbffd8abe9402655a3d81, UNASSIGN 2024-12-06T08:21:00,727 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=8e01d2c9a20bbffd8abe9402655a3d81, regionState=CLOSING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:00,727 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T08:21:00,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; CloseRegionProcedure 8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:21:00,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T08:21:00,879 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:00,879 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(124): Close 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:21:00,879 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T08:21:00,879 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1681): Closing 8e01d2c9a20bbffd8abe9402655a3d81, disabling compactions & flushes 2024-12-06T08:21:00,879 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:21:00,879 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:21:00,879 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. after waiting 0 ms 2024-12-06T08:21:00,879 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:21:00,879 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(2837): Flushing 8e01d2c9a20bbffd8abe9402655a3d81 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-06T08:21:00,880 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=A 2024-12-06T08:21:00,880 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:00,880 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=B 2024-12-06T08:21:00,880 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:00,880 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e01d2c9a20bbffd8abe9402655a3d81, store=C 2024-12-06T08:21:00,880 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:00,883 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/3dc964401c034fb791f90691395ba074 is 50, key is test_row_0/A:col10/1733473260226/Put/seqid=0 2024-12-06T08:21:00,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742369_1545 (size=9857) 2024-12-06T08:21:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T08:21:01,287 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/3dc964401c034fb791f90691395ba074 2024-12-06T08:21:01,292 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/ad675f1dff5841d4b43b9729fbbc75a1 is 50, key is test_row_0/B:col10/1733473260226/Put/seqid=0 2024-12-06T08:21:01,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742370_1546 (size=9857) 2024-12-06T08:21:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T08:21:01,695 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/ad675f1dff5841d4b43b9729fbbc75a1 2024-12-06T08:21:01,700 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/508bdf51cf1440d59c2b648863fa87de is 50, key is test_row_0/C:col10/1733473260226/Put/seqid=0 2024-12-06T08:21:01,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742371_1547 (size=9857) 2024-12-06T08:21:01,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T08:21:02,104 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/508bdf51cf1440d59c2b648863fa87de 2024-12-06T08:21:02,107 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/A/3dc964401c034fb791f90691395ba074 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/3dc964401c034fb791f90691395ba074 2024-12-06T08:21:02,110 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/3dc964401c034fb791f90691395ba074, entries=100, sequenceid=395, filesize=9.6 K 2024-12-06T08:21:02,110 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/B/ad675f1dff5841d4b43b9729fbbc75a1 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/ad675f1dff5841d4b43b9729fbbc75a1 2024-12-06T08:21:02,113 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/ad675f1dff5841d4b43b9729fbbc75a1, entries=100, sequenceid=395, filesize=9.6 K 2024-12-06T08:21:02,113 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/.tmp/C/508bdf51cf1440d59c2b648863fa87de as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/508bdf51cf1440d59c2b648863fa87de 2024-12-06T08:21:02,119 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/508bdf51cf1440d59c2b648863fa87de, entries=100, sequenceid=395, filesize=9.6 K 2024-12-06T08:21:02,120 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 8e01d2c9a20bbffd8abe9402655a3d81 in 1241ms, sequenceid=395, compaction requested=true 2024-12-06T08:21:02,121 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/4a947f29a88441b8a148743e3ca1c161, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a6247bb980294f9dab1250c5acfcb6c4, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/dd923086402d42ae96b247fc77000aaf, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/15cdebe030bd450eb6acbf5153061be1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/2296bad74f8c4fc69753a2e269b56800, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/819787ddff6a47de931955217609d15d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f26f361f4cea4103b2d4aec01456b059, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/6b571e76a2794facbaf2b49e96d9c497, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a5f1c08d247149438ca9e6ffaea3278a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/b4e3f4dbca0f415883ef9afee011194a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/e47c09e021d248998bfe942db4fb1f32, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d8b144d73bcb45fa9ed7476f9dac166d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/3523437a90f647b4bab2de30aece58fc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c88e6414570c41e0bab20dda04b13970, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5101f1bbae244eea966bdecfc263035d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c7bad45f4387422ba53603207e17305f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d4e6d58d620a471abe62a5d7f6fafd9c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d1a8b7a165fe414dbcdf9aadbd6bb158, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/149791a633884c0189cf76beb0958ac3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9b1b009b26c443b3b8ea6df85a6992b0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/96435e0920854346a0f45c338ec77978, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/ba959d6193024aee811f2f3b007c54ac, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5228f861635c49ae87512d0b3325d30f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/118973a6f40b46debe15092f80e205cf, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a737a43753fe46519b0ab3c88b9594c8, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9495a6bde2fa4570badcf5d21174fa1b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f9f702301a0b4804ab27067109d7c6dc] to archive 2024-12-06T08:21:02,122 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:21:02,123 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/4a947f29a88441b8a148743e3ca1c161 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/4a947f29a88441b8a148743e3ca1c161 2024-12-06T08:21:02,124 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a6247bb980294f9dab1250c5acfcb6c4 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a6247bb980294f9dab1250c5acfcb6c4 2024-12-06T08:21:02,125 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/dd923086402d42ae96b247fc77000aaf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/dd923086402d42ae96b247fc77000aaf 2024-12-06T08:21:02,126 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/15cdebe030bd450eb6acbf5153061be1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/15cdebe030bd450eb6acbf5153061be1 2024-12-06T08:21:02,127 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/2296bad74f8c4fc69753a2e269b56800 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/2296bad74f8c4fc69753a2e269b56800 2024-12-06T08:21:02,128 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/819787ddff6a47de931955217609d15d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/819787ddff6a47de931955217609d15d 2024-12-06T08:21:02,129 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f26f361f4cea4103b2d4aec01456b059 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f26f361f4cea4103b2d4aec01456b059 2024-12-06T08:21:02,129 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/6b571e76a2794facbaf2b49e96d9c497 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/6b571e76a2794facbaf2b49e96d9c497 2024-12-06T08:21:02,130 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a5f1c08d247149438ca9e6ffaea3278a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a5f1c08d247149438ca9e6ffaea3278a 2024-12-06T08:21:02,131 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/b4e3f4dbca0f415883ef9afee011194a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/b4e3f4dbca0f415883ef9afee011194a 2024-12-06T08:21:02,132 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/e47c09e021d248998bfe942db4fb1f32 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/e47c09e021d248998bfe942db4fb1f32 2024-12-06T08:21:02,133 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d8b144d73bcb45fa9ed7476f9dac166d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d8b144d73bcb45fa9ed7476f9dac166d 2024-12-06T08:21:02,133 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/3523437a90f647b4bab2de30aece58fc to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/3523437a90f647b4bab2de30aece58fc 2024-12-06T08:21:02,134 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c88e6414570c41e0bab20dda04b13970 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c88e6414570c41e0bab20dda04b13970 2024-12-06T08:21:02,135 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5101f1bbae244eea966bdecfc263035d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5101f1bbae244eea966bdecfc263035d 2024-12-06T08:21:02,136 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c7bad45f4387422ba53603207e17305f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/c7bad45f4387422ba53603207e17305f 2024-12-06T08:21:02,137 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d4e6d58d620a471abe62a5d7f6fafd9c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d4e6d58d620a471abe62a5d7f6fafd9c 2024-12-06T08:21:02,137 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d1a8b7a165fe414dbcdf9aadbd6bb158 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/d1a8b7a165fe414dbcdf9aadbd6bb158 2024-12-06T08:21:02,138 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/149791a633884c0189cf76beb0958ac3 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/149791a633884c0189cf76beb0958ac3 2024-12-06T08:21:02,139 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9b1b009b26c443b3b8ea6df85a6992b0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9b1b009b26c443b3b8ea6df85a6992b0 2024-12-06T08:21:02,140 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/96435e0920854346a0f45c338ec77978 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/96435e0920854346a0f45c338ec77978 2024-12-06T08:21:02,141 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/ba959d6193024aee811f2f3b007c54ac to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/ba959d6193024aee811f2f3b007c54ac 2024-12-06T08:21:02,141 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5228f861635c49ae87512d0b3325d30f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/5228f861635c49ae87512d0b3325d30f 2024-12-06T08:21:02,142 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/118973a6f40b46debe15092f80e205cf to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/118973a6f40b46debe15092f80e205cf 2024-12-06T08:21:02,143 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a737a43753fe46519b0ab3c88b9594c8 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/a737a43753fe46519b0ab3c88b9594c8 2024-12-06T08:21:02,144 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9495a6bde2fa4570badcf5d21174fa1b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/9495a6bde2fa4570badcf5d21174fa1b 2024-12-06T08:21:02,145 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f9f702301a0b4804ab27067109d7c6dc to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/f9f702301a0b4804ab27067109d7c6dc 2024-12-06T08:21:02,146 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/fe11b321ee8f4bd09873db16afff56d9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0e827d60aac24c69ad41bf0570dd8e51, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/55bf3273536b4800900e9d6e2407737e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/12f501cefe8e4a3a94b73605e03cde00, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/dc49cc07b2364d2e818fe434b7fd9984, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/9072a89636374abc8d8726c0ed96b9c5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/741bca471b3447089d231a0642bfaf0b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/21a4e4dd669c4a6c9f6001a16ccb582c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/b4e2498c0cd54f51abbacd5b34978edb, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/64e565867675445bbc1e1664ab807d4e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/332e98a4961a4acaa34f8ba94b41e6a8, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1da4d7d12b1546b49943f605dec6504d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6130564f7ff6437f972f183148310844, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/614f69c99b504d23a5d05930ce1b7ce1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6ef82f2685af4f99b37d79a74c26cd0a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0b5cd4ac15a6426ca3308eaf04aa3053, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/e062e09088e54e8b8e9e09f0e430044f, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/46c92a73c22241f2b6716c1aa5a89368, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/ea790a77a2c243e6bb7d96eec3d5f9ff, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/de52c4639cf64a38a2e02caa5b61e619, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/03e17198aa1f4206b65ac0521d07f3e0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1eaa4624f24e4d0e805ff10dc1092e52, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/039b2886ff4c4e588ca2db2ec3c32778, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/403649a7a86645b7b8be31846563b706, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/f65d0df571d0462389af8b0dbeb7e6d9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/2b96bd2edbd746c5bd3e3e62d2bdf19e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/3e1139f961074068be4659a03783ccd0] to archive 2024-12-06T08:21:02,146 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:21:02,147 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/fe11b321ee8f4bd09873db16afff56d9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/fe11b321ee8f4bd09873db16afff56d9 2024-12-06T08:21:02,148 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0e827d60aac24c69ad41bf0570dd8e51 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0e827d60aac24c69ad41bf0570dd8e51 2024-12-06T08:21:02,149 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/55bf3273536b4800900e9d6e2407737e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/55bf3273536b4800900e9d6e2407737e 2024-12-06T08:21:02,150 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/12f501cefe8e4a3a94b73605e03cde00 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/12f501cefe8e4a3a94b73605e03cde00 2024-12-06T08:21:02,151 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/dc49cc07b2364d2e818fe434b7fd9984 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/dc49cc07b2364d2e818fe434b7fd9984 2024-12-06T08:21:02,151 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/9072a89636374abc8d8726c0ed96b9c5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/9072a89636374abc8d8726c0ed96b9c5 2024-12-06T08:21:02,152 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/741bca471b3447089d231a0642bfaf0b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/741bca471b3447089d231a0642bfaf0b 2024-12-06T08:21:02,153 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/21a4e4dd669c4a6c9f6001a16ccb582c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/21a4e4dd669c4a6c9f6001a16ccb582c 2024-12-06T08:21:02,154 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/b4e2498c0cd54f51abbacd5b34978edb to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/b4e2498c0cd54f51abbacd5b34978edb 2024-12-06T08:21:02,155 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/64e565867675445bbc1e1664ab807d4e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/64e565867675445bbc1e1664ab807d4e 2024-12-06T08:21:02,155 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/332e98a4961a4acaa34f8ba94b41e6a8 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/332e98a4961a4acaa34f8ba94b41e6a8 2024-12-06T08:21:02,156 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1da4d7d12b1546b49943f605dec6504d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1da4d7d12b1546b49943f605dec6504d 2024-12-06T08:21:02,157 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6130564f7ff6437f972f183148310844 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6130564f7ff6437f972f183148310844 2024-12-06T08:21:02,158 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/614f69c99b504d23a5d05930ce1b7ce1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/614f69c99b504d23a5d05930ce1b7ce1 2024-12-06T08:21:02,158 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6ef82f2685af4f99b37d79a74c26cd0a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6ef82f2685af4f99b37d79a74c26cd0a 2024-12-06T08:21:02,159 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0b5cd4ac15a6426ca3308eaf04aa3053 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/0b5cd4ac15a6426ca3308eaf04aa3053 2024-12-06T08:21:02,160 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/e062e09088e54e8b8e9e09f0e430044f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/e062e09088e54e8b8e9e09f0e430044f 2024-12-06T08:21:02,161 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/46c92a73c22241f2b6716c1aa5a89368 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/46c92a73c22241f2b6716c1aa5a89368 2024-12-06T08:21:02,161 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/ea790a77a2c243e6bb7d96eec3d5f9ff to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/ea790a77a2c243e6bb7d96eec3d5f9ff 2024-12-06T08:21:02,162 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/de52c4639cf64a38a2e02caa5b61e619 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/de52c4639cf64a38a2e02caa5b61e619 2024-12-06T08:21:02,163 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/03e17198aa1f4206b65ac0521d07f3e0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/03e17198aa1f4206b65ac0521d07f3e0 2024-12-06T08:21:02,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1eaa4624f24e4d0e805ff10dc1092e52 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/1eaa4624f24e4d0e805ff10dc1092e52 2024-12-06T08:21:02,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/039b2886ff4c4e588ca2db2ec3c32778 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/039b2886ff4c4e588ca2db2ec3c32778 2024-12-06T08:21:02,165 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/403649a7a86645b7b8be31846563b706 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/403649a7a86645b7b8be31846563b706 2024-12-06T08:21:02,166 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/f65d0df571d0462389af8b0dbeb7e6d9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/f65d0df571d0462389af8b0dbeb7e6d9 2024-12-06T08:21:02,167 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/2b96bd2edbd746c5bd3e3e62d2bdf19e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/2b96bd2edbd746c5bd3e3e62d2bdf19e 2024-12-06T08:21:02,167 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/3e1139f961074068be4659a03783ccd0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/3e1139f961074068be4659a03783ccd0 2024-12-06T08:21:02,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d89a23ced0f4d448bcfdf72218ee6f9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/254d1b5e9ea846978e351bb1e1ddf08d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5c0a522866554b8486bedf1659d743f1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/78528288789848c48dec22c1c3efc51a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/b90c210757844153b172c081a196b8b9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d3dc5a033ae4a02b64acfea1d1774b7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/4c7f99b6669741c2ae66e40c3a1463b2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5707d70e73ea4a239c8b6358cef749f3, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/f333d8035f744e4191d08daedc979b29, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/0bfb249e7b4541d6964e602048ecfece, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5876dd32c64c477d843c2ef62ef1ffbd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/e1e275c3fba84a479176c849272b3a1d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/aa028b9f671e4e19916fdc6bb051c92e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/4aa02e2cadd04318b43572493474431a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/d4961c265d454f5185312da4e51185f5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/28af1588ec4a4036b915ea11ad6027ee, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/50abe4ba20e64e898c46cca5a4b8c1c2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c928428a408944878ebfe06e99534f22, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/720e78e124d547d8ad79b5f0adac05b2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c59ea82a5bc8437fb97214db4bd9c31e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/6749423fbe3a477380085ce96c5f0ff1, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/bdac32d2891847f48dfbd41b08899e45, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/abde12ac13de430fbea8117be7a66155, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/119f2c5d869b46a6a190dbf0bf715990, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/9b7bb15366194e6eae628c08ba873f60, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/1afacef486304ebc911a3000d5ecc2e4, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5522820522334efc878c887b6fb79a41] to archive 2024-12-06T08:21:02,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:21:02,171 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d89a23ced0f4d448bcfdf72218ee6f9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d89a23ced0f4d448bcfdf72218ee6f9 2024-12-06T08:21:02,172 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/254d1b5e9ea846978e351bb1e1ddf08d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/254d1b5e9ea846978e351bb1e1ddf08d 2024-12-06T08:21:02,173 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5c0a522866554b8486bedf1659d743f1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5c0a522866554b8486bedf1659d743f1 2024-12-06T08:21:02,174 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/78528288789848c48dec22c1c3efc51a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/78528288789848c48dec22c1c3efc51a 2024-12-06T08:21:02,174 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/b90c210757844153b172c081a196b8b9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/b90c210757844153b172c081a196b8b9 2024-12-06T08:21:02,175 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d3dc5a033ae4a02b64acfea1d1774b7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/7d3dc5a033ae4a02b64acfea1d1774b7 2024-12-06T08:21:02,176 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/4c7f99b6669741c2ae66e40c3a1463b2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/4c7f99b6669741c2ae66e40c3a1463b2 2024-12-06T08:21:02,176 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5707d70e73ea4a239c8b6358cef749f3 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5707d70e73ea4a239c8b6358cef749f3 2024-12-06T08:21:02,177 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/f333d8035f744e4191d08daedc979b29 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/f333d8035f744e4191d08daedc979b29 2024-12-06T08:21:02,178 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/0bfb249e7b4541d6964e602048ecfece to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/0bfb249e7b4541d6964e602048ecfece 2024-12-06T08:21:02,179 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5876dd32c64c477d843c2ef62ef1ffbd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5876dd32c64c477d843c2ef62ef1ffbd 2024-12-06T08:21:02,179 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/e1e275c3fba84a479176c849272b3a1d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/e1e275c3fba84a479176c849272b3a1d 2024-12-06T08:21:02,180 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/aa028b9f671e4e19916fdc6bb051c92e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/aa028b9f671e4e19916fdc6bb051c92e 2024-12-06T08:21:02,181 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/4aa02e2cadd04318b43572493474431a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/4aa02e2cadd04318b43572493474431a 2024-12-06T08:21:02,181 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/d4961c265d454f5185312da4e51185f5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/d4961c265d454f5185312da4e51185f5 2024-12-06T08:21:02,182 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/28af1588ec4a4036b915ea11ad6027ee to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/28af1588ec4a4036b915ea11ad6027ee 2024-12-06T08:21:02,183 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/50abe4ba20e64e898c46cca5a4b8c1c2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/50abe4ba20e64e898c46cca5a4b8c1c2 2024-12-06T08:21:02,183 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c928428a408944878ebfe06e99534f22 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c928428a408944878ebfe06e99534f22 2024-12-06T08:21:02,184 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/720e78e124d547d8ad79b5f0adac05b2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/720e78e124d547d8ad79b5f0adac05b2 2024-12-06T08:21:02,185 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c59ea82a5bc8437fb97214db4bd9c31e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/c59ea82a5bc8437fb97214db4bd9c31e 2024-12-06T08:21:02,186 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/6749423fbe3a477380085ce96c5f0ff1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/6749423fbe3a477380085ce96c5f0ff1 2024-12-06T08:21:02,186 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/bdac32d2891847f48dfbd41b08899e45 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/bdac32d2891847f48dfbd41b08899e45 2024-12-06T08:21:02,187 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/abde12ac13de430fbea8117be7a66155 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/abde12ac13de430fbea8117be7a66155 2024-12-06T08:21:02,188 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/119f2c5d869b46a6a190dbf0bf715990 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/119f2c5d869b46a6a190dbf0bf715990 2024-12-06T08:21:02,188 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/9b7bb15366194e6eae628c08ba873f60 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/9b7bb15366194e6eae628c08ba873f60 2024-12-06T08:21:02,189 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/1afacef486304ebc911a3000d5ecc2e4 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/1afacef486304ebc911a3000d5ecc2e4 2024-12-06T08:21:02,190 DEBUG [StoreCloser-TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5522820522334efc878c887b6fb79a41 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/5522820522334efc878c887b6fb79a41 2024-12-06T08:21:02,193 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/recovered.edits/398.seqid, newMaxSeqId=398, maxSeqId=1 2024-12-06T08:21:02,194 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81. 2024-12-06T08:21:02,194 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1635): Region close journal for 8e01d2c9a20bbffd8abe9402655a3d81: 2024-12-06T08:21:02,195 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(170): Closed 8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:21:02,196 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=8e01d2c9a20bbffd8abe9402655a3d81, regionState=CLOSED 2024-12-06T08:21:02,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-06T08:21:02,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseRegionProcedure 8e01d2c9a20bbffd8abe9402655a3d81, server=b6b797fc3981,38041,1733473111442 in 1.4680 sec 2024-12-06T08:21:02,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-12-06T08:21:02,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e01d2c9a20bbffd8abe9402655a3d81, UNASSIGN in 1.4710 sec 2024-12-06T08:21:02,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-06T08:21:02,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4730 sec 2024-12-06T08:21:02,200 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473262200"}]},"ts":"1733473262200"} 2024-12-06T08:21:02,201 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T08:21:02,203 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T08:21:02,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4850 sec 2024-12-06T08:21:02,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-06T08:21:02,825 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-12-06T08:21:02,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T08:21:02,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:02,826 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:02,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-06T08:21:02,827 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=154, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:02,828 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:21:02,830 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/recovered.edits] 2024-12-06T08:21:02,832 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/3dc964401c034fb791f90691395ba074 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/3dc964401c034fb791f90691395ba074 2024-12-06T08:21:02,833 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/6a96176364aa4d248e8e810b6bdd1d93 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/6a96176364aa4d248e8e810b6bdd1d93 2024-12-06T08:21:02,834 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/fbbc4689d18d4ed7844cc4f4e73dd912 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/A/fbbc4689d18d4ed7844cc4f4e73dd912 2024-12-06T08:21:02,835 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/4fae7d7a32de4e9e9849617aa64f0877 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/4fae7d7a32de4e9e9849617aa64f0877 2024-12-06T08:21:02,836 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6cce8539aeac4d948585d6653b9a47eb to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/6cce8539aeac4d948585d6653b9a47eb 2024-12-06T08:21:02,838 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/ad675f1dff5841d4b43b9729fbbc75a1 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/B/ad675f1dff5841d4b43b9729fbbc75a1 2024-12-06T08:21:02,840 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/508bdf51cf1440d59c2b648863fa87de to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/508bdf51cf1440d59c2b648863fa87de 2024-12-06T08:21:02,841 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/95e3175e9ff44aa298e8362bb4f5b27f to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/95e3175e9ff44aa298e8362bb4f5b27f 2024-12-06T08:21:02,842 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/f60068cf0ac04dc7ba75aed47d0cff6e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/C/f60068cf0ac04dc7ba75aed47d0cff6e 2024-12-06T08:21:02,844 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/recovered.edits/398.seqid to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81/recovered.edits/398.seqid 2024-12-06T08:21:02,845 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/8e01d2c9a20bbffd8abe9402655a3d81 2024-12-06T08:21:02,845 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T08:21:02,846 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=154, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:02,848 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T08:21:02,849 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T08:21:02,850 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=154, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:02,850 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T08:21:02,850 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733473262850"}]},"ts":"9223372036854775807"} 2024-12-06T08:21:02,851 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T08:21:02,851 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8e01d2c9a20bbffd8abe9402655a3d81, NAME => 'TestAcidGuarantees,,1733473237881.8e01d2c9a20bbffd8abe9402655a3d81.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T08:21:02,852 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T08:21:02,852 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733473262852"}]},"ts":"9223372036854775807"} 2024-12-06T08:21:02,853 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T08:21:02,855 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=154, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:02,855 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 30 msec 2024-12-06T08:21:02,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-06T08:21:02,928 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-12-06T08:21:02,937 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=239 (was 237) - Thread LEAK? -, OpenFileDescriptor=450 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=432 (was 394) - SystemLoadAverage LEAK? -, ProcessCount=9 (was 9), AvailableMemoryMB=8465 (was 8484) 2024-12-06T08:21:02,945 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=239, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=432, ProcessCount=9, AvailableMemoryMB=8464 2024-12-06T08:21:02,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T08:21:02,946 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:21:02,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:02,948 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:21:02,948 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:02,948 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 155 2024-12-06T08:21:02,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T08:21:02,948 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:21:02,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742372_1548 (size=963) 2024-12-06T08:21:03,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T08:21:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T08:21:03,355 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156 2024-12-06T08:21:03,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742373_1549 (size=53) 2024-12-06T08:21:03,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T08:21:03,760 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:21:03,760 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 4407da04b0ac162131052d3fbc4bef2c, disabling compactions & flushes 2024-12-06T08:21:03,760 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:03,760 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:03,760 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. after waiting 0 ms 2024-12-06T08:21:03,760 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:03,760 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:03,760 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:03,761 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:21:03,761 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733473263761"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733473263761"}]},"ts":"1733473263761"} 2024-12-06T08:21:03,762 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:21:03,763 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:21:03,763 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473263763"}]},"ts":"1733473263763"} 2024-12-06T08:21:03,764 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-06T08:21:03,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, ASSIGN}] 2024-12-06T08:21:03,768 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, ASSIGN 2024-12-06T08:21:03,768 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, ASSIGN; state=OFFLINE, location=b6b797fc3981,38041,1733473111442; forceNewPlan=false, retain=false 2024-12-06T08:21:03,919 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=4407da04b0ac162131052d3fbc4bef2c, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:03,920 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; OpenRegionProcedure 4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:21:04,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T08:21:04,071 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:04,074 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:04,074 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7285): Opening region: {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:21:04,074 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:04,074 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:21:04,074 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7327): checking encryption for 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:04,074 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7330): checking classloading for 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:04,075 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:04,076 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:21:04,076 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4407da04b0ac162131052d3fbc4bef2c columnFamilyName A 2024-12-06T08:21:04,076 DEBUG [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:04,077 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(327): Store=4407da04b0ac162131052d3fbc4bef2c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:21:04,077 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:04,078 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:21:04,078 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4407da04b0ac162131052d3fbc4bef2c columnFamilyName B 2024-12-06T08:21:04,078 DEBUG [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:04,078 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(327): Store=4407da04b0ac162131052d3fbc4bef2c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:21:04,078 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:04,079 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:21:04,079 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4407da04b0ac162131052d3fbc4bef2c columnFamilyName C 2024-12-06T08:21:04,079 DEBUG [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:04,079 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(327): Store=4407da04b0ac162131052d3fbc4bef2c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:21:04,079 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:04,080 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:04,080 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:04,081 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:21:04,082 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1085): writing seq id for 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:04,083 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:21:04,083 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1102): Opened 4407da04b0ac162131052d3fbc4bef2c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75208317, jitterRate=0.12069125473499298}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:21:04,084 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1001): Region open journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:04,085 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., pid=157, masterSystemTime=1733473264071 2024-12-06T08:21:04,086 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:04,086 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:04,086 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=4407da04b0ac162131052d3fbc4bef2c, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:04,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-06T08:21:04,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; OpenRegionProcedure 4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 in 167 msec 2024-12-06T08:21:04,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-06T08:21:04,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, ASSIGN in 321 msec 2024-12-06T08:21:04,090 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:21:04,090 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473264090"}]},"ts":"1733473264090"} 2024-12-06T08:21:04,091 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-06T08:21:04,093 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:21:04,094 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1470 sec 2024-12-06T08:21:05,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-06T08:21:05,052 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-06T08:21:05,053 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x593af048 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1cbd2497 2024-12-06T08:21:05,060 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17e5a47d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:05,061 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:05,062 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37522, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:05,063 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:21:05,064 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:21:05,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-06T08:21:05,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:21:05,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:05,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742374_1550 (size=999) 2024-12-06T08:21:05,477 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-06T08:21:05,477 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-06T08:21:05,479 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T08:21:05,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, REOPEN/MOVE}] 2024-12-06T08:21:05,480 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, REOPEN/MOVE 2024-12-06T08:21:05,481 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=4407da04b0ac162131052d3fbc4bef2c, regionState=CLOSING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:05,482 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T08:21:05,482 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure 4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:21:05,633 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:05,633 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,633 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T08:21:05,633 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing 4407da04b0ac162131052d3fbc4bef2c, disabling compactions & flushes 2024-12-06T08:21:05,633 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:05,633 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:05,633 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. after waiting 0 ms 2024-12-06T08:21:05,633 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:05,637 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-06T08:21:05,637 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:05,637 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:05,637 WARN [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionServer(3786): Not adding moved region record: 4407da04b0ac162131052d3fbc4bef2c to self. 2024-12-06T08:21:05,638 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,639 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=4407da04b0ac162131052d3fbc4bef2c, regionState=CLOSED 2024-12-06T08:21:05,640 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-12-06T08:21:05,640 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure 4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 in 157 msec 2024-12-06T08:21:05,641 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, REOPEN/MOVE; state=CLOSED, location=b6b797fc3981,38041,1733473111442; forceNewPlan=false, retain=true 2024-12-06T08:21:05,791 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=4407da04b0ac162131052d3fbc4bef2c, regionState=OPENING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:05,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=160, state=RUNNABLE; OpenRegionProcedure 4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:21:05,944 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:05,946 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:05,946 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7285): Opening region: {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:21:05,947 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,947 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:21:05,947 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7327): checking encryption for 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,947 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7330): checking classloading for 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,948 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,948 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:21:05,949 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4407da04b0ac162131052d3fbc4bef2c columnFamilyName A 2024-12-06T08:21:05,949 DEBUG [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:05,950 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(327): Store=4407da04b0ac162131052d3fbc4bef2c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:21:05,950 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,951 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:21:05,951 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4407da04b0ac162131052d3fbc4bef2c columnFamilyName B 2024-12-06T08:21:05,951 DEBUG [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:05,951 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(327): Store=4407da04b0ac162131052d3fbc4bef2c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:21:05,951 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,952 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-06T08:21:05,952 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4407da04b0ac162131052d3fbc4bef2c columnFamilyName C 2024-12-06T08:21:05,952 DEBUG [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:05,952 INFO [StoreOpener-4407da04b0ac162131052d3fbc4bef2c-1 {}] regionserver.HStore(327): Store=4407da04b0ac162131052d3fbc4bef2c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:21:05,952 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:05,953 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,953 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,954 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:21:05,955 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1085): writing seq id for 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:05,956 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1102): Opened 4407da04b0ac162131052d3fbc4bef2c; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58807724, jitterRate=-0.12369662523269653}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:21:05,956 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1001): Region open journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:05,957 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., pid=162, masterSystemTime=1733473265944 2024-12-06T08:21:05,958 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:05,958 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:05,958 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=4407da04b0ac162131052d3fbc4bef2c, regionState=OPEN, openSeqNum=5, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:05,960 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=160 2024-12-06T08:21:05,960 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=160, state=SUCCESS; OpenRegionProcedure 4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 in 167 msec 2024-12-06T08:21:05,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-06T08:21:05,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, REOPEN/MOVE in 480 msec 2024-12-06T08:21:05,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-06T08:21:05,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 482 msec 2024-12-06T08:21:05,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 897 msec 2024-12-06T08:21:05,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-06T08:21:05,965 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2209c520 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5765d46a 2024-12-06T08:21:05,968 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d9954b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:05,969 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x537a66f8 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ac53e79 2024-12-06T08:21:05,973 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d5efb7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:05,974 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06094c70 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc9c3e 2024-12-06T08:21:05,980 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc332d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:05,981 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x103dfc6e to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7181df3b 2024-12-06T08:21:05,986 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17327621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:05,986 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e047c09 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11030ef5 2024-12-06T08:21:05,989 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1584f18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:05,989 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x69abefea to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b914bf4 2024-12-06T08:21:05,994 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@91d72db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:05,994 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e757135 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f6a59e4 2024-12-06T08:21:05,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d836f78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:05,998 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7846cb78 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@150e08ed 2024-12-06T08:21:06,002 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53305d9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:06,002 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f1754bc to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a3b66d3 2024-12-06T08:21:06,010 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bb6288a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:06,011 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d9113f3 to 127.0.0.1:65195 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5cfdf76c 2024-12-06T08:21:06,017 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6556601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:21:06,020 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:21:06,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-06T08:21:06,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T08:21:06,021 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:21:06,022 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:21:06,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:21:06,024 DEBUG [hconnection-0x69e48acd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:06,025 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:06,028 DEBUG [hconnection-0x32f24f6e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:06,028 DEBUG [hconnection-0x1c902d8f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:06,029 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37532, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:06,029 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37540, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:06,031 DEBUG [hconnection-0x66d9b5b3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:06,032 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:06,032 DEBUG [hconnection-0x282b7e62-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:06,033 DEBUG [hconnection-0x5261857b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:06,033 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37566, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:06,034 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37572, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:06,034 DEBUG [hconnection-0x4507b384-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:06,035 DEBUG [hconnection-0x63cb45fb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:06,035 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:06,036 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:06,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:21:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:06,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:06,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:06,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:06,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:06,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:06,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:06,048 DEBUG [hconnection-0x6d8ebdc9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:06,048 DEBUG [hconnection-0x231f2405-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:21:06,049 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37604, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:06,050 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37612, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:21:06,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473326061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473326062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473326062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473326063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473326063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206dd1aa6d608644fc9af5e5359808945ce_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473266036/Put/seqid=0 2024-12-06T08:21:06,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742375_1551 (size=12154) 2024-12-06T08:21:06,098 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:06,101 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206dd1aa6d608644fc9af5e5359808945ce_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206dd1aa6d608644fc9af5e5359808945ce_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:06,102 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ab007ed019fd43898977a21bd1debc66, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:06,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ab007ed019fd43898977a21bd1debc66 is 175, key is test_row_0/A:col10/1733473266036/Put/seqid=0 2024-12-06T08:21:06,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742376_1552 (size=30955) 2024-12-06T08:21:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T08:21:06,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473326165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473326165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473326165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473326165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473326166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,173 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-06T08:21:06,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:06,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T08:21:06,326 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-06T08:21:06,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:06,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473326367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473326367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473326367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473326367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473326368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,479 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-06T08:21:06,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:06,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,480 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,508 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ab007ed019fd43898977a21bd1debc66 2024-12-06T08:21:06,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/ec8222fc8d8d4d3f8ff5a093a46f4e47 is 50, key is test_row_0/B:col10/1733473266036/Put/seqid=0 2024-12-06T08:21:06,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742377_1553 (size=12001) 2024-12-06T08:21:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T08:21:06,632 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-06T08:21:06,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:06,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473326670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473326671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473326671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473326671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473326672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,784 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-06T08:21:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,937 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:06,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-06T08:21:06,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:06,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:06,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:06,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/ec8222fc8d8d4d3f8ff5a093a46f4e47 2024-12-06T08:21:06,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/ed66a15171e04ce1a7696cd51a24de91 is 50, key is test_row_0/C:col10/1733473266036/Put/seqid=0 2024-12-06T08:21:06,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742378_1554 (size=12001) 2024-12-06T08:21:06,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/ed66a15171e04ce1a7696cd51a24de91 2024-12-06T08:21:06,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ab007ed019fd43898977a21bd1debc66 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab007ed019fd43898977a21bd1debc66 2024-12-06T08:21:06,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab007ed019fd43898977a21bd1debc66, entries=150, sequenceid=16, filesize=30.2 K 2024-12-06T08:21:06,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/ec8222fc8d8d4d3f8ff5a093a46f4e47 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ec8222fc8d8d4d3f8ff5a093a46f4e47 2024-12-06T08:21:06,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ec8222fc8d8d4d3f8ff5a093a46f4e47, entries=150, sequenceid=16, filesize=11.7 K 2024-12-06T08:21:06,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/ed66a15171e04ce1a7696cd51a24de91 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ed66a15171e04ce1a7696cd51a24de91 2024-12-06T08:21:06,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ed66a15171e04ce1a7696cd51a24de91, entries=150, sequenceid=16, filesize=11.7 K 2024-12-06T08:21:06,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 4407da04b0ac162131052d3fbc4bef2c in 955ms, sequenceid=16, compaction requested=false 2024-12-06T08:21:06,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:07,091 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-06T08:21:07,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:07,091 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:21:07,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:07,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:07,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:07,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:07,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:07,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:07,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120653f854422e46466c8cda4b62720a8949_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473266061/Put/seqid=0 2024-12-06T08:21:07,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742379_1555 (size=12154) 2024-12-06T08:21:07,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T08:21:07,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:07,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:07,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473327179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473327181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473327182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473327182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473327182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473327283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473327285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473327285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473327285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473327286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473327484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473327487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473327488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473327488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473327488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:07,519 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120653f854422e46466c8cda4b62720a8949_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120653f854422e46466c8cda4b62720a8949_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:07,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/a8342f95e78c474bb5c9317a4f69e964, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:07,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/a8342f95e78c474bb5c9317a4f69e964 is 175, key is test_row_0/A:col10/1733473266061/Put/seqid=0 2024-12-06T08:21:07,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742380_1556 (size=30955) 2024-12-06T08:21:07,720 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T08:21:07,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473327788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473327789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473327790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473327790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:07,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473327791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:07,925 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/a8342f95e78c474bb5c9317a4f69e964 2024-12-06T08:21:07,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/41507be46e49451c9a31a01524e8e524 is 50, key is test_row_0/B:col10/1733473266061/Put/seqid=0 2024-12-06T08:21:07,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742381_1557 (size=12001) 2024-12-06T08:21:07,941 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/41507be46e49451c9a31a01524e8e524 2024-12-06T08:21:07,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/2de6af495ee646ccaf47760f1b7fe041 is 50, key is test_row_0/C:col10/1733473266061/Put/seqid=0 2024-12-06T08:21:07,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742382_1558 (size=12001) 2024-12-06T08:21:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T08:21:08,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473328292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:08,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473328293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:08,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473328294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:08,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473328295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:08,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473328296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:08,363 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/2de6af495ee646ccaf47760f1b7fe041 2024-12-06T08:21:08,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/a8342f95e78c474bb5c9317a4f69e964 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/a8342f95e78c474bb5c9317a4f69e964 2024-12-06T08:21:08,371 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/a8342f95e78c474bb5c9317a4f69e964, entries=150, sequenceid=41, filesize=30.2 K 2024-12-06T08:21:08,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/41507be46e49451c9a31a01524e8e524 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/41507be46e49451c9a31a01524e8e524 2024-12-06T08:21:08,376 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/41507be46e49451c9a31a01524e8e524, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T08:21:08,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/2de6af495ee646ccaf47760f1b7fe041 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2de6af495ee646ccaf47760f1b7fe041 2024-12-06T08:21:08,380 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2de6af495ee646ccaf47760f1b7fe041, entries=150, sequenceid=41, filesize=11.7 K 2024-12-06T08:21:08,381 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 4407da04b0ac162131052d3fbc4bef2c in 1290ms, sequenceid=41, compaction requested=false 2024-12-06T08:21:08,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:08,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:08,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-06T08:21:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-06T08:21:08,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-06T08:21:08,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3600 sec 2024-12-06T08:21:08,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.3640 sec 2024-12-06T08:21:09,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:09,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-06T08:21:09,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:09,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:09,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:09,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:09,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:09,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:09,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c9fbb30599f04d4497d6ca6806127b73_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473267180/Put/seqid=0 2024-12-06T08:21:09,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742383_1559 (size=12154) 2024-12-06T08:21:09,311 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:09,314 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c9fbb30599f04d4497d6ca6806127b73_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c9fbb30599f04d4497d6ca6806127b73_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:09,315 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/37b5637f9b8e4bb38257bb31fe035efd, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:09,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/37b5637f9b8e4bb38257bb31fe035efd is 175, key is test_row_0/A:col10/1733473267180/Put/seqid=0 2024-12-06T08:21:09,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473329316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742384_1560 (size=30955) 2024-12-06T08:21:09,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473329316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473329317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473329318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473329318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473329420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473329420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473329420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473329421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473329421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473329623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473329624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473329624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473329624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473329624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,720 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/37b5637f9b8e4bb38257bb31fe035efd 2024-12-06T08:21:09,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/0e37f1b1220d4f0d86cfe2eceb2e0d65 is 50, key is test_row_0/B:col10/1733473267180/Put/seqid=0 2024-12-06T08:21:09,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742385_1561 (size=12001) 2024-12-06T08:21:09,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/0e37f1b1220d4f0d86cfe2eceb2e0d65 2024-12-06T08:21:09,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/b629c79d7ae8405baa35dd0c3ce2f692 is 50, key is test_row_0/C:col10/1733473267180/Put/seqid=0 2024-12-06T08:21:09,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742386_1562 (size=12001) 2024-12-06T08:21:09,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/b629c79d7ae8405baa35dd0c3ce2f692 2024-12-06T08:21:09,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/37b5637f9b8e4bb38257bb31fe035efd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/37b5637f9b8e4bb38257bb31fe035efd 2024-12-06T08:21:09,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/37b5637f9b8e4bb38257bb31fe035efd, entries=150, sequenceid=55, filesize=30.2 K 2024-12-06T08:21:09,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/0e37f1b1220d4f0d86cfe2eceb2e0d65 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0e37f1b1220d4f0d86cfe2eceb2e0d65 2024-12-06T08:21:09,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0e37f1b1220d4f0d86cfe2eceb2e0d65, entries=150, sequenceid=55, filesize=11.7 K 2024-12-06T08:21:09,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/b629c79d7ae8405baa35dd0c3ce2f692 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b629c79d7ae8405baa35dd0c3ce2f692 2024-12-06T08:21:09,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b629c79d7ae8405baa35dd0c3ce2f692, entries=150, sequenceid=55, filesize=11.7 K 2024-12-06T08:21:09,754 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 4407da04b0ac162131052d3fbc4bef2c in 453ms, sequenceid=55, compaction requested=true 2024-12-06T08:21:09,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:09,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:21:09,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:09,754 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:09,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:21:09,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:09,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:21:09,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:09,754 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:09,755 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:09,755 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/A is initiating minor compaction (all files) 2024-12-06T08:21:09,755 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/A in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:09,756 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab007ed019fd43898977a21bd1debc66, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/a8342f95e78c474bb5c9317a4f69e964, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/37b5637f9b8e4bb38257bb31fe035efd] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=90.7 K 2024-12-06T08:21:09,756 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:09,756 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab007ed019fd43898977a21bd1debc66, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/a8342f95e78c474bb5c9317a4f69e964, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/37b5637f9b8e4bb38257bb31fe035efd] 2024-12-06T08:21:09,756 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:09,756 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/B is initiating minor compaction (all files) 2024-12-06T08:21:09,756 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/B in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:09,756 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ec8222fc8d8d4d3f8ff5a093a46f4e47, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/41507be46e49451c9a31a01524e8e524, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0e37f1b1220d4f0d86cfe2eceb2e0d65] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=35.2 K 2024-12-06T08:21:09,756 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab007ed019fd43898977a21bd1debc66, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733473266033 2024-12-06T08:21:09,756 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ec8222fc8d8d4d3f8ff5a093a46f4e47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733473266033 2024-12-06T08:21:09,757 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8342f95e78c474bb5c9317a4f69e964, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733473266057 2024-12-06T08:21:09,757 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 41507be46e49451c9a31a01524e8e524, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733473266057 2024-12-06T08:21:09,757 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37b5637f9b8e4bb38257bb31fe035efd, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733473267180 2024-12-06T08:21:09,757 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e37f1b1220d4f0d86cfe2eceb2e0d65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733473267180 2024-12-06T08:21:09,763 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:09,764 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#B#compaction#481 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:09,765 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/fb79320c97ce4333acfaf58abb5923fc is 50, key is test_row_0/B:col10/1733473267180/Put/seqid=0 2024-12-06T08:21:09,765 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412063e665d62e3214c60abc49cb2f2c32c8c_4407da04b0ac162131052d3fbc4bef2c store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:09,767 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412063e665d62e3214c60abc49cb2f2c32c8c_4407da04b0ac162131052d3fbc4bef2c, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:09,767 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063e665d62e3214c60abc49cb2f2c32c8c_4407da04b0ac162131052d3fbc4bef2c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:09,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742388_1564 (size=4469) 2024-12-06T08:21:09,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742387_1563 (size=12104) 2024-12-06T08:21:09,787 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#A#compaction#480 average throughput is 1.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:09,789 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/45d62b2570b2467486294b0e29c77c53 is 175, key is test_row_0/A:col10/1733473267180/Put/seqid=0 2024-12-06T08:21:09,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742389_1565 (size=31058) 2024-12-06T08:21:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:09,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T08:21:09,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:09,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:09,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:09,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:09,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:09,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:09,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120600453ad2a932499b9e5a13b4a3187e49_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473269315/Put/seqid=0 2024-12-06T08:21:09,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742390_1566 (size=12154) 2024-12-06T08:21:09,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473329934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473329935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473329934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473329936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:09,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:09,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473329937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473330039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473330039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473330039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473330039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473330040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-06T08:21:10,127 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-06T08:21:10,128 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:21:10,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-06T08:21:10,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T08:21:10,130 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:21:10,130 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:21:10,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:21:10,191 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/fb79320c97ce4333acfaf58abb5923fc as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/fb79320c97ce4333acfaf58abb5923fc 2024-12-06T08:21:10,195 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/B of 4407da04b0ac162131052d3fbc4bef2c into fb79320c97ce4333acfaf58abb5923fc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:10,195 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:10,195 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/B, priority=13, startTime=1733473269754; duration=0sec 2024-12-06T08:21:10,195 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:10,195 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:B 2024-12-06T08:21:10,195 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:10,196 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:10,196 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/C is initiating minor compaction (all files) 2024-12-06T08:21:10,196 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/C in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,196 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ed66a15171e04ce1a7696cd51a24de91, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2de6af495ee646ccaf47760f1b7fe041, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b629c79d7ae8405baa35dd0c3ce2f692] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=35.2 K 2024-12-06T08:21:10,197 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ed66a15171e04ce1a7696cd51a24de91, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733473266033 2024-12-06T08:21:10,197 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/45d62b2570b2467486294b0e29c77c53 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/45d62b2570b2467486294b0e29c77c53 2024-12-06T08:21:10,197 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2de6af495ee646ccaf47760f1b7fe041, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733473266057 2024-12-06T08:21:10,198 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting b629c79d7ae8405baa35dd0c3ce2f692, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733473267180 2024-12-06T08:21:10,201 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/A of 4407da04b0ac162131052d3fbc4bef2c into 45d62b2570b2467486294b0e29c77c53(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:10,201 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:10,201 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/A, priority=13, startTime=1733473269754; duration=0sec 2024-12-06T08:21:10,201 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:10,201 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:A 2024-12-06T08:21:10,207 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#C#compaction#483 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:10,207 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/820fe96ac1da45fd8ecb1ef997828832 is 50, key is test_row_0/C:col10/1733473267180/Put/seqid=0 2024-12-06T08:21:10,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742391_1567 (size=12104) 2024-12-06T08:21:10,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T08:21:10,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473330242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473330242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473330243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473330243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473330243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,282 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-06T08:21:10,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:10,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,339 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:10,342 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120600453ad2a932499b9e5a13b4a3187e49_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120600453ad2a932499b9e5a13b4a3187e49_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:10,343 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b0ed8592a671457ea0761dbe20a85288, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:10,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b0ed8592a671457ea0761dbe20a85288 is 175, key is test_row_0/A:col10/1733473269315/Put/seqid=0 2024-12-06T08:21:10,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742392_1568 (size=30955) 2024-12-06T08:21:10,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T08:21:10,434 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-06T08:21:10,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:10,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473330545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473330546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473330547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473330548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:10,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473330549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,587 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-06T08:21:10,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:10,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,588 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,623 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/820fe96ac1da45fd8ecb1ef997828832 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/820fe96ac1da45fd8ecb1ef997828832 2024-12-06T08:21:10,627 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/C of 4407da04b0ac162131052d3fbc4bef2c into 820fe96ac1da45fd8ecb1ef997828832(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:10,627 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:10,627 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/C, priority=13, startTime=1733473269754; duration=0sec 2024-12-06T08:21:10,627 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:10,627 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:C 2024-12-06T08:21:10,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T08:21:10,740 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-06T08:21:10,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:10,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,748 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b0ed8592a671457ea0761dbe20a85288 2024-12-06T08:21:10,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/43ba1b94963e4925b5483436b7c53f6e is 50, key is test_row_0/B:col10/1733473269315/Put/seqid=0 2024-12-06T08:21:10,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742393_1569 (size=12001) 2024-12-06T08:21:10,758 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/43ba1b94963e4925b5483436b7c53f6e 2024-12-06T08:21:10,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/1eb445a634434dc9b80eaa431c7d23ee is 50, key is test_row_0/C:col10/1733473269315/Put/seqid=0 2024-12-06T08:21:10,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742394_1570 (size=12001) 2024-12-06T08:21:10,893 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:10,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-06T08:21:10,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:10,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:10,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:10,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:11,045 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:11,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-06T08:21:11,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:11,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:11,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:11,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:11,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:11,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:11,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473331048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:11,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:11,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473331049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:11,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:11,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473331051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:11,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:11,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473331053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:11,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:11,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473331054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:11,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/1eb445a634434dc9b80eaa431c7d23ee 2024-12-06T08:21:11,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b0ed8592a671457ea0761dbe20a85288 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b0ed8592a671457ea0761dbe20a85288 2024-12-06T08:21:11,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b0ed8592a671457ea0761dbe20a85288, entries=150, sequenceid=78, filesize=30.2 K 2024-12-06T08:21:11,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/43ba1b94963e4925b5483436b7c53f6e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/43ba1b94963e4925b5483436b7c53f6e 2024-12-06T08:21:11,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/43ba1b94963e4925b5483436b7c53f6e, entries=150, sequenceid=78, filesize=11.7 K 2024-12-06T08:21:11,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/1eb445a634434dc9b80eaa431c7d23ee as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/1eb445a634434dc9b80eaa431c7d23ee 2024-12-06T08:21:11,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/1eb445a634434dc9b80eaa431c7d23ee, entries=150, sequenceid=78, filesize=11.7 K 2024-12-06T08:21:11,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 4407da04b0ac162131052d3fbc4bef2c in 1259ms, sequenceid=78, compaction requested=false 2024-12-06T08:21:11,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:11,199 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:11,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-06T08:21:11,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:11,200 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T08:21:11,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:11,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:11,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:11,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:11,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:11,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:11,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412065bf163a363bc43e2bc64c07c8ab5ee69_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473269936/Put/seqid=0 2024-12-06T08:21:11,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742395_1571 (size=12154) 2024-12-06T08:21:11,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T08:21:11,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:11,628 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412065bf163a363bc43e2bc64c07c8ab5ee69_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412065bf163a363bc43e2bc64c07c8ab5ee69_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:11,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/e35a2d5f235245b59b84c84c1e45c947, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:11,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/e35a2d5f235245b59b84c84c1e45c947 is 175, key is test_row_0/A:col10/1733473269936/Put/seqid=0 2024-12-06T08:21:11,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742396_1572 (size=30955) 2024-12-06T08:21:12,044 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/e35a2d5f235245b59b84c84c1e45c947 2024-12-06T08:21:12,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/acbf38b8f85f41b0913855bf8258a9de is 50, key is test_row_0/B:col10/1733473269936/Put/seqid=0 2024-12-06T08:21:12,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742397_1573 (size=12001) 2024-12-06T08:21:12,056 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/acbf38b8f85f41b0913855bf8258a9de 2024-12-06T08:21:12,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:12,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:12,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/c57492566d7b48dc80a2b8942804e809 is 50, key is test_row_0/C:col10/1733473269936/Put/seqid=0 2024-12-06T08:21:12,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742398_1574 (size=12001) 2024-12-06T08:21:12,067 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/c57492566d7b48dc80a2b8942804e809 2024-12-06T08:21:12,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/e35a2d5f235245b59b84c84c1e45c947 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e35a2d5f235245b59b84c84c1e45c947 2024-12-06T08:21:12,075 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e35a2d5f235245b59b84c84c1e45c947, entries=150, sequenceid=94, filesize=30.2 K 2024-12-06T08:21:12,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/acbf38b8f85f41b0913855bf8258a9de as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/acbf38b8f85f41b0913855bf8258a9de 2024-12-06T08:21:12,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473332073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473332074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473332075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473332076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,079 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/acbf38b8f85f41b0913855bf8258a9de, entries=150, sequenceid=94, filesize=11.7 K 2024-12-06T08:21:12,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473332076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/c57492566d7b48dc80a2b8942804e809 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/c57492566d7b48dc80a2b8942804e809 2024-12-06T08:21:12,083 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/c57492566d7b48dc80a2b8942804e809, entries=150, sequenceid=94, filesize=11.7 K 2024-12-06T08:21:12,084 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 4407da04b0ac162131052d3fbc4bef2c in 884ms, sequenceid=94, compaction requested=true 2024-12-06T08:21:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-06T08:21:12,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-06T08:21:12,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-06T08:21:12,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9550 sec 2024-12-06T08:21:12,088 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.9590 sec 2024-12-06T08:21:12,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:12,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:21:12,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:12,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:12,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:12,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:12,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:12,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:12,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206cf4d8e0451d84c8083061cb6cbd0627d_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473272069/Put/seqid=0 2024-12-06T08:21:12,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473332184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473332185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473332186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742399_1575 (size=12154) 2024-12-06T08:21:12,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473332187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473332188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-06T08:21:12,233 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-06T08:21:12,234 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:21:12,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-12-06T08:21:12,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T08:21:12,236 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:21:12,236 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:21:12,236 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:21:12,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473332289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473332289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473332290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473332291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473332292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T08:21:12,387 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T08:21:12,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:12,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:12,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:12,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473332492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473332493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473332494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473332495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473332495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T08:21:12,540 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T08:21:12,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:12,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:12,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:12,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,591 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:12,594 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206cf4d8e0451d84c8083061cb6cbd0627d_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206cf4d8e0451d84c8083061cb6cbd0627d_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:12,595 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/c2b9716f8fe94eb4962a97b9dfa853e6, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:12,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/c2b9716f8fe94eb4962a97b9dfa853e6 is 175, key is test_row_0/A:col10/1733473272069/Put/seqid=0 2024-12-06T08:21:12,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742400_1576 (size=30955) 2024-12-06T08:21:12,600 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/c2b9716f8fe94eb4962a97b9dfa853e6 2024-12-06T08:21:12,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/987b0d0448944fb997037b66a7e6891a is 50, key is test_row_0/B:col10/1733473272069/Put/seqid=0 2024-12-06T08:21:12,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742401_1577 (size=12001) 2024-12-06T08:21:12,693 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T08:21:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:12,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473332794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473332796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473332798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473332799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473332799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T08:21:12,845 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:12,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T08:21:12,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:12,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:12,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:12,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:13,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T08:21:13,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:13,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:13,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:13,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:13,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:13,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:13,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/987b0d0448944fb997037b66a7e6891a 2024-12-06T08:21:13,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/0b9602b5b43341d9a436fdb32868fb7a is 50, key is test_row_0/C:col10/1733473272069/Put/seqid=0 2024-12-06T08:21:13,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742402_1578 (size=12001) 2024-12-06T08:21:13,027 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/0b9602b5b43341d9a436fdb32868fb7a 2024-12-06T08:21:13,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/c2b9716f8fe94eb4962a97b9dfa853e6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c2b9716f8fe94eb4962a97b9dfa853e6 2024-12-06T08:21:13,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c2b9716f8fe94eb4962a97b9dfa853e6, entries=150, sequenceid=119, filesize=30.2 K 2024-12-06T08:21:13,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/987b0d0448944fb997037b66a7e6891a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/987b0d0448944fb997037b66a7e6891a 2024-12-06T08:21:13,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/987b0d0448944fb997037b66a7e6891a, entries=150, sequenceid=119, filesize=11.7 K 2024-12-06T08:21:13,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/0b9602b5b43341d9a436fdb32868fb7a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/0b9602b5b43341d9a436fdb32868fb7a 2024-12-06T08:21:13,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/0b9602b5b43341d9a436fdb32868fb7a, entries=150, sequenceid=119, filesize=11.7 K 2024-12-06T08:21:13,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 4407da04b0ac162131052d3fbc4bef2c in 863ms, sequenceid=119, compaction requested=true 2024-12-06T08:21:13,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:13,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:21:13,042 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:21:13,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:13,042 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:21:13,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:21:13,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:13,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:21:13,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:13,043 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123923 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:21:13,044 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:21:13,044 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/A is initiating minor compaction (all files) 2024-12-06T08:21:13,044 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/B is initiating minor compaction (all files) 2024-12-06T08:21:13,044 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/A in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:13,044 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/B in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:13,044 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/45d62b2570b2467486294b0e29c77c53, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b0ed8592a671457ea0761dbe20a85288, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e35a2d5f235245b59b84c84c1e45c947, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c2b9716f8fe94eb4962a97b9dfa853e6] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=121.0 K 2024-12-06T08:21:13,044 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/fb79320c97ce4333acfaf58abb5923fc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/43ba1b94963e4925b5483436b7c53f6e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/acbf38b8f85f41b0913855bf8258a9de, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/987b0d0448944fb997037b66a7e6891a] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=47.0 K 2024-12-06T08:21:13,044 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:13,044 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/45d62b2570b2467486294b0e29c77c53, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b0ed8592a671457ea0761dbe20a85288, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e35a2d5f235245b59b84c84c1e45c947, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c2b9716f8fe94eb4962a97b9dfa853e6] 2024-12-06T08:21:13,044 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting fb79320c97ce4333acfaf58abb5923fc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733473267180 2024-12-06T08:21:13,045 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45d62b2570b2467486294b0e29c77c53, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733473267180 2024-12-06T08:21:13,045 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 43ba1b94963e4925b5483436b7c53f6e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733473269315 2024-12-06T08:21:13,045 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0ed8592a671457ea0761dbe20a85288, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733473269315 2024-12-06T08:21:13,045 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting acbf38b8f85f41b0913855bf8258a9de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733473269930 2024-12-06T08:21:13,045 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting e35a2d5f235245b59b84c84c1e45c947, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733473269930 2024-12-06T08:21:13,045 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 987b0d0448944fb997037b66a7e6891a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473272069 2024-12-06T08:21:13,046 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2b9716f8fe94eb4962a97b9dfa853e6, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473272069 2024-12-06T08:21:13,064 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#B#compaction#492 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:13,065 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/483a7ce99114435abbcda92b4986f34c is 50, key is test_row_0/B:col10/1733473272069/Put/seqid=0 2024-12-06T08:21:13,072 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:13,076 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206574035d3184e4e9a93cdc97855d0bbf8_4407da04b0ac162131052d3fbc4bef2c store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:13,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742403_1579 (size=12241) 2024-12-06T08:21:13,079 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206574035d3184e4e9a93cdc97855d0bbf8_4407da04b0ac162131052d3fbc4bef2c, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:13,079 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206574035d3184e4e9a93cdc97855d0bbf8_4407da04b0ac162131052d3fbc4bef2c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:13,085 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/483a7ce99114435abbcda92b4986f34c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/483a7ce99114435abbcda92b4986f34c 2024-12-06T08:21:13,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742404_1580 (size=4469) 2024-12-06T08:21:13,091 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/B of 4407da04b0ac162131052d3fbc4bef2c into 483a7ce99114435abbcda92b4986f34c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:13,091 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:13,091 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/B, priority=12, startTime=1733473273042; duration=0sec 2024-12-06T08:21:13,092 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:13,092 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:B 2024-12-06T08:21:13,092 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T08:21:13,093 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-06T08:21:13,093 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/C is initiating minor compaction (all files) 2024-12-06T08:21:13,093 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/C in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:13,093 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/820fe96ac1da45fd8ecb1ef997828832, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/1eb445a634434dc9b80eaa431c7d23ee, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/c57492566d7b48dc80a2b8942804e809, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/0b9602b5b43341d9a436fdb32868fb7a] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=47.0 K 2024-12-06T08:21:13,093 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 820fe96ac1da45fd8ecb1ef997828832, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733473267180 2024-12-06T08:21:13,094 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 1eb445a634434dc9b80eaa431c7d23ee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733473269315 2024-12-06T08:21:13,094 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting c57492566d7b48dc80a2b8942804e809, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733473269930 2024-12-06T08:21:13,094 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b9602b5b43341d9a436fdb32868fb7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473272069 2024-12-06T08:21:13,103 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#C#compaction#494 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:13,104 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/4a79e27ab8d946349ba702acfc0733af is 50, key is test_row_0/C:col10/1733473272069/Put/seqid=0 2024-12-06T08:21:13,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742405_1581 (size=12241) 2024-12-06T08:21:13,153 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-06T08:21:13,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:13,154 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T08:21:13,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:13,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:13,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:13,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:13,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:13,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:13,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120645f7468886824ca2a2904d36182a4d5e_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473272186/Put/seqid=0 2024-12-06T08:21:13,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742406_1582 (size=12254) 2024-12-06T08:21:13,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:13,166 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120645f7468886824ca2a2904d36182a4d5e_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120645f7468886824ca2a2904d36182a4d5e_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:13,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b62f90107dd24591900de92ad95d8054, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:13,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b62f90107dd24591900de92ad95d8054 is 175, key is test_row_0/A:col10/1733473272186/Put/seqid=0 2024-12-06T08:21:13,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742407_1583 (size=31055) 2024-12-06T08:21:13,173 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b62f90107dd24591900de92ad95d8054 2024-12-06T08:21:13,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/bbf25487ca454d2089973d621d7332b9 is 50, key is test_row_0/B:col10/1733473272186/Put/seqid=0 2024-12-06T08:21:13,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742408_1584 (size=12101) 2024-12-06T08:21:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:13,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:13,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473333316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473333316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473333316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473333318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473333320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T08:21:13,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473333421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473333421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473333421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473333421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473333423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,492 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#A#compaction#493 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:13,493 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/4b0c5061967b45ba9d8c9edd59e4714b is 175, key is test_row_0/A:col10/1733473272069/Put/seqid=0 2024-12-06T08:21:13,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742409_1585 (size=31195) 2024-12-06T08:21:13,527 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/4a79e27ab8d946349ba702acfc0733af as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/4a79e27ab8d946349ba702acfc0733af 2024-12-06T08:21:13,531 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/C of 4407da04b0ac162131052d3fbc4bef2c into 4a79e27ab8d946349ba702acfc0733af(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:13,531 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:13,532 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/C, priority=12, startTime=1733473273043; duration=0sec 2024-12-06T08:21:13,532 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:13,532 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:C 2024-12-06T08:21:13,597 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/bbf25487ca454d2089973d621d7332b9 2024-12-06T08:21:13,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/46fc128ee2d14b2486c07754e5a0d05a is 50, key is test_row_0/C:col10/1733473272186/Put/seqid=0 2024-12-06T08:21:13,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742410_1586 (size=12101) 2024-12-06T08:21:13,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473333624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473333624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473333625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473333626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473333627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,902 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/4b0c5061967b45ba9d8c9edd59e4714b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/4b0c5061967b45ba9d8c9edd59e4714b 2024-12-06T08:21:13,906 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/A of 4407da04b0ac162131052d3fbc4bef2c into 4b0c5061967b45ba9d8c9edd59e4714b(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:13,906 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:13,906 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/A, priority=12, startTime=1733473273042; duration=0sec 2024-12-06T08:21:13,907 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:13,907 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:A 2024-12-06T08:21:13,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473333926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473333928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473333928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473333929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:13,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473333931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,008 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/46fc128ee2d14b2486c07754e5a0d05a 2024-12-06T08:21:14,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b62f90107dd24591900de92ad95d8054 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b62f90107dd24591900de92ad95d8054 2024-12-06T08:21:14,016 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b62f90107dd24591900de92ad95d8054, entries=150, sequenceid=132, filesize=30.3 K 2024-12-06T08:21:14,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/bbf25487ca454d2089973d621d7332b9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/bbf25487ca454d2089973d621d7332b9 2024-12-06T08:21:14,021 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/bbf25487ca454d2089973d621d7332b9, entries=150, sequenceid=132, filesize=11.8 K 2024-12-06T08:21:14,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/46fc128ee2d14b2486c07754e5a0d05a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/46fc128ee2d14b2486c07754e5a0d05a 2024-12-06T08:21:14,025 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/46fc128ee2d14b2486c07754e5a0d05a, entries=150, sequenceid=132, filesize=11.8 K 2024-12-06T08:21:14,026 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 4407da04b0ac162131052d3fbc4bef2c in 872ms, sequenceid=132, compaction requested=false 2024-12-06T08:21:14,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:14,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:14,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-06T08:21:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-06T08:21:14,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-06T08:21:14,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7910 sec 2024-12-06T08:21:14,030 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.7950 sec 2024-12-06T08:21:14,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-06T08:21:14,339 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-06T08:21:14,341 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:21:14,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-12-06T08:21:14,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T08:21:14,342 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:21:14,343 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:21:14,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:21:14,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:14,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:21:14,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:14,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:14,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:14,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:14,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:14,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:14,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120643c7e09b753c4631953bd75f782d6350_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473273314/Put/seqid=0 2024-12-06T08:21:14,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473334437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473334437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473334438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473334440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473334440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T08:21:14,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742411_1587 (size=12304) 2024-12-06T08:21:14,494 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T08:21:14,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:14,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:14,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:14,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473334541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473334541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473334543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473334543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T08:21:14,647 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T08:21:14,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:14,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:14,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:14,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473334743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473334744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473334746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:14,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473334746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,799 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T08:21:14,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:14,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:14,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:14,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,844 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:14,847 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120643c7e09b753c4631953bd75f782d6350_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120643c7e09b753c4631953bd75f782d6350_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:14,848 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b28b6eadb21746e39db127ef40d5cb4e, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:14,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b28b6eadb21746e39db127ef40d5cb4e is 175, key is test_row_0/A:col10/1733473273314/Put/seqid=0 2024-12-06T08:21:14,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742412_1588 (size=31105) 2024-12-06T08:21:14,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T08:21:14,952 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:14,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T08:21:14,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:14,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:14,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:14,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:14,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473335047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:15,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473335048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:15,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473335049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:15,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473335050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,104 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T08:21:15,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:15,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,253 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b28b6eadb21746e39db127ef40d5cb4e 2024-12-06T08:21:15,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T08:21:15,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:15,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/8fafcd8f0f8e40f1b05f45289b04c1bd is 50, key is test_row_0/B:col10/1733473273314/Put/seqid=0 2024-12-06T08:21:15,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742413_1589 (size=12151) 2024-12-06T08:21:15,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/8fafcd8f0f8e40f1b05f45289b04c1bd 2024-12-06T08:21:15,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/dd10a9f2d0df4b0eacc2de9d58a84411 is 50, key is test_row_0/C:col10/1733473273314/Put/seqid=0 2024-12-06T08:21:15,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742414_1590 (size=12151) 2024-12-06T08:21:15,409 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T08:21:15,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:15,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,410 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T08:21:15,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:15,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473335449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473335551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473335551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473335551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:15,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473335552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,562 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T08:21:15,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:15,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:15,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/dd10a9f2d0df4b0eacc2de9d58a84411 2024-12-06T08:21:15,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/b28b6eadb21746e39db127ef40d5cb4e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b28b6eadb21746e39db127ef40d5cb4e 2024-12-06T08:21:15,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b28b6eadb21746e39db127ef40d5cb4e, entries=150, sequenceid=159, filesize=30.4 K 2024-12-06T08:21:15,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/8fafcd8f0f8e40f1b05f45289b04c1bd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/8fafcd8f0f8e40f1b05f45289b04c1bd 2024-12-06T08:21:15,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/8fafcd8f0f8e40f1b05f45289b04c1bd, entries=150, sequenceid=159, filesize=11.9 K 2024-12-06T08:21:15,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/dd10a9f2d0df4b0eacc2de9d58a84411 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/dd10a9f2d0df4b0eacc2de9d58a84411 2024-12-06T08:21:15,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/dd10a9f2d0df4b0eacc2de9d58a84411, entries=150, sequenceid=159, filesize=11.9 K 2024-12-06T08:21:15,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 4407da04b0ac162131052d3fbc4bef2c in 1263ms, sequenceid=159, compaction requested=true 2024-12-06T08:21:15,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:15,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:21:15,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:15,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:21:15,693 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:15,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:15,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:21:15,693 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:15,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:15,694 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:15,694 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/B is initiating minor compaction (all files) 2024-12-06T08:21:15,694 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/B in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,695 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/483a7ce99114435abbcda92b4986f34c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/bbf25487ca454d2089973d621d7332b9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/8fafcd8f0f8e40f1b05f45289b04c1bd] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=35.6 K 2024-12-06T08:21:15,695 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93355 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:15,695 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/A is initiating minor compaction (all files) 2024-12-06T08:21:15,695 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/A in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,695 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/4b0c5061967b45ba9d8c9edd59e4714b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b62f90107dd24591900de92ad95d8054, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b28b6eadb21746e39db127ef40d5cb4e] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=91.2 K 2024-12-06T08:21:15,695 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,695 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/4b0c5061967b45ba9d8c9edd59e4714b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b62f90107dd24591900de92ad95d8054, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b28b6eadb21746e39db127ef40d5cb4e] 2024-12-06T08:21:15,695 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 483a7ce99114435abbcda92b4986f34c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473272069 2024-12-06T08:21:15,695 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b0c5061967b45ba9d8c9edd59e4714b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473272069 2024-12-06T08:21:15,696 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b62f90107dd24591900de92ad95d8054, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733473272185 2024-12-06T08:21:15,696 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting bbf25487ca454d2089973d621d7332b9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733473272185 2024-12-06T08:21:15,696 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fafcd8f0f8e40f1b05f45289b04c1bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733473273314 2024-12-06T08:21:15,696 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting b28b6eadb21746e39db127ef40d5cb4e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733473273314 2024-12-06T08:21:15,702 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:15,703 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#B#compaction#501 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:15,704 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/51dffdebc3d04a5d92112d1659a04526 is 50, key is test_row_0/B:col10/1733473273314/Put/seqid=0 2024-12-06T08:21:15,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:15,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-06T08:21:15,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,716 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-06T08:21:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:15,718 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412065e0f69a58db0444fa06221ba844d5602_4407da04b0ac162131052d3fbc4bef2c store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:15,719 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412065e0f69a58db0444fa06221ba844d5602_4407da04b0ac162131052d3fbc4bef2c, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:15,720 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412065e0f69a58db0444fa06221ba844d5602_4407da04b0ac162131052d3fbc4bef2c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:15,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742415_1591 (size=12493) 2024-12-06T08:21:15,749 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/51dffdebc3d04a5d92112d1659a04526 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/51dffdebc3d04a5d92112d1659a04526 2024-12-06T08:21:15,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120675aba228b1ee426598ce0f3a2b0b6485_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473274438/Put/seqid=0 2024-12-06T08:21:15,755 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/B of 4407da04b0ac162131052d3fbc4bef2c into 51dffdebc3d04a5d92112d1659a04526(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:15,755 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:15,755 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/B, priority=13, startTime=1733473275693; duration=0sec 2024-12-06T08:21:15,755 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:15,755 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:B 2024-12-06T08:21:15,756 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:15,756 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:15,757 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/C is initiating minor compaction (all files) 2024-12-06T08:21:15,757 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/C in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:15,757 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/4a79e27ab8d946349ba702acfc0733af, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/46fc128ee2d14b2486c07754e5a0d05a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/dd10a9f2d0df4b0eacc2de9d58a84411] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=35.6 K 2024-12-06T08:21:15,757 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a79e27ab8d946349ba702acfc0733af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733473272069 2024-12-06T08:21:15,758 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 46fc128ee2d14b2486c07754e5a0d05a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733473272185 2024-12-06T08:21:15,758 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting dd10a9f2d0df4b0eacc2de9d58a84411, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733473273314 2024-12-06T08:21:15,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742416_1592 (size=4469) 2024-12-06T08:21:15,777 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#A#compaction#502 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:15,778 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/e073010fde8343d297cd1bbec93f5845 is 175, key is test_row_0/A:col10/1733473273314/Put/seqid=0 2024-12-06T08:21:15,786 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#C#compaction#504 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:15,786 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/e41f0c8a8ae54608b77cc2209fd1fe92 is 50, key is test_row_0/C:col10/1733473273314/Put/seqid=0 2024-12-06T08:21:15,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742417_1593 (size=12304) 2024-12-06T08:21:15,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742418_1594 (size=31447) 2024-12-06T08:21:15,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742419_1595 (size=12493) 2024-12-06T08:21:15,813 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/e073010fde8343d297cd1bbec93f5845 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e073010fde8343d297cd1bbec93f5845 2024-12-06T08:21:15,821 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/e41f0c8a8ae54608b77cc2209fd1fe92 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/e41f0c8a8ae54608b77cc2209fd1fe92 2024-12-06T08:21:15,823 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/A of 4407da04b0ac162131052d3fbc4bef2c into e073010fde8343d297cd1bbec93f5845(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:15,823 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:15,823 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/A, priority=13, startTime=1733473275693; duration=0sec 2024-12-06T08:21:15,823 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:15,823 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:A 2024-12-06T08:21:15,826 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/C of 4407da04b0ac162131052d3fbc4bef2c into e41f0c8a8ae54608b77cc2209fd1fe92(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:15,826 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:15,826 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/C, priority=13, startTime=1733473275693; duration=0sec 2024-12-06T08:21:15,826 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:15,826 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:C 2024-12-06T08:21:16,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,206 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120675aba228b1ee426598ce0f3a2b0b6485_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120675aba228b1ee426598ce0f3a2b0b6485_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:16,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/c55fc02dbe324ef18f8f00bfbbd573e5, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:16,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/c55fc02dbe324ef18f8f00bfbbd573e5 is 175, key is test_row_0/A:col10/1733473274438/Put/seqid=0 2024-12-06T08:21:16,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742420_1596 (size=31105) 2024-12-06T08:21:16,212 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/c55fc02dbe324ef18f8f00bfbbd573e5 2024-12-06T08:21:16,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/0c23769881d74cdebe9286d4af6a96e5 is 50, key is test_row_0/B:col10/1733473274438/Put/seqid=0 2024-12-06T08:21:16,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742421_1597 (size=12151) 2024-12-06T08:21:16,228 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/0c23769881d74cdebe9286d4af6a96e5 2024-12-06T08:21:16,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/3b556d263ca84035b946964e7458209e is 50, key is test_row_0/C:col10/1733473274438/Put/seqid=0 2024-12-06T08:21:16,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742422_1598 (size=12151) 2024-12-06T08:21:16,248 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/3b556d263ca84035b946964e7458209e 2024-12-06T08:21:16,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/c55fc02dbe324ef18f8f00bfbbd573e5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c55fc02dbe324ef18f8f00bfbbd573e5 2024-12-06T08:21:16,255 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c55fc02dbe324ef18f8f00bfbbd573e5, entries=150, sequenceid=170, filesize=30.4 K 2024-12-06T08:21:16,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/0c23769881d74cdebe9286d4af6a96e5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0c23769881d74cdebe9286d4af6a96e5 2024-12-06T08:21:16,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,259 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0c23769881d74cdebe9286d4af6a96e5, entries=150, sequenceid=170, filesize=11.9 K 2024-12-06T08:21:16,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/3b556d263ca84035b946964e7458209e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/3b556d263ca84035b946964e7458209e 2024-12-06T08:21:16,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,275 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/3b556d263ca84035b946964e7458209e, entries=150, sequenceid=170, filesize=11.9 K 2024-12-06T08:21:16,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,276 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for 4407da04b0ac162131052d3fbc4bef2c in 561ms, sequenceid=170, compaction requested=false 2024-12-06T08:21:16,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:16,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:16,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-12-06T08:21:16,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-12-06T08:21:16,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,278 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-06T08:21:16,278 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9340 sec 2024-12-06T08:21:16,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.9380 sec 2024-12-06T08:21:16,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-06T08:21:16,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,447 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-06T08:21:16,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:21:16,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-12-06T08:21:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,450 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:21:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T08:21:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,451 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:21:16,451 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:21:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T08:21:16,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T08:21:16,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:16,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:16,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:16,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:16,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,602 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T08:21:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:16,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:16,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:16,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:16,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d439b57ee9fd4020894362c87e637698_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473276594/Put/seqid=0 2024-12-06T08:21:16,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742423_1599 (size=14794) 2024-12-06T08:21:16,635 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:16,640 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206d439b57ee9fd4020894362c87e637698_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d439b57ee9fd4020894362c87e637698_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:16,642 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/e9d72625beeb4e21ae05d0b589e6208e, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:16,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473336637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473336637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/e9d72625beeb4e21ae05d0b589e6208e is 175, key is test_row_0/A:col10/1733473276594/Put/seqid=0 2024-12-06T08:21:16,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473336639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473336639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742424_1600 (size=39749) 2024-12-06T08:21:16,672 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=185, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/e9d72625beeb4e21ae05d0b589e6208e 2024-12-06T08:21:16,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/6e3f299b3f4c4eaf907599be0e7d5c52 is 50, key is test_row_0/B:col10/1733473276594/Put/seqid=0 2024-12-06T08:21:16,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742425_1601 (size=12151) 2024-12-06T08:21:16,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/6e3f299b3f4c4eaf907599be0e7d5c52 2024-12-06T08:21:16,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/2908b69a03d7457f9a69850c01b6d9c2 is 50, key is test_row_0/C:col10/1733473276594/Put/seqid=0 2024-12-06T08:21:16,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742426_1602 (size=12151) 2024-12-06T08:21:16,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473336743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473336743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473336744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473336745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T08:21:16,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T08:21:16,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:16,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:16,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:16,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:16,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:16,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:16,907 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T08:21:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:16,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:16,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:16,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473336947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473336947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:16,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473336948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:16,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473336948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T08:21:17,060 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T08:21:17,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:17,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:17,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:17,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:17,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:17,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:17,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/2908b69a03d7457f9a69850c01b6d9c2 2024-12-06T08:21:17,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/e9d72625beeb4e21ae05d0b589e6208e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e9d72625beeb4e21ae05d0b589e6208e 2024-12-06T08:21:17,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e9d72625beeb4e21ae05d0b589e6208e, entries=200, sequenceid=185, filesize=38.8 K 2024-12-06T08:21:17,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/6e3f299b3f4c4eaf907599be0e7d5c52 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/6e3f299b3f4c4eaf907599be0e7d5c52 2024-12-06T08:21:17,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/6e3f299b3f4c4eaf907599be0e7d5c52, entries=150, sequenceid=185, filesize=11.9 K 2024-12-06T08:21:17,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/2908b69a03d7457f9a69850c01b6d9c2 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2908b69a03d7457f9a69850c01b6d9c2 2024-12-06T08:21:17,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2908b69a03d7457f9a69850c01b6d9c2, entries=150, sequenceid=185, filesize=11.9 K 2024-12-06T08:21:17,119 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 4407da04b0ac162131052d3fbc4bef2c in 522ms, sequenceid=185, compaction requested=true 2024-12-06T08:21:17,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:17,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:21:17,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:17,120 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:17,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:21:17,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:17,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:21:17,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:17,120 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:17,121 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:17,121 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:17,121 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/B is initiating minor compaction (all files) 2024-12-06T08:21:17,121 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/A is initiating minor compaction (all files) 2024-12-06T08:21:17,121 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/B in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:17,121 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/A in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:17,121 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/51dffdebc3d04a5d92112d1659a04526, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0c23769881d74cdebe9286d4af6a96e5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/6e3f299b3f4c4eaf907599be0e7d5c52] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=35.9 K 2024-12-06T08:21:17,121 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e073010fde8343d297cd1bbec93f5845, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c55fc02dbe324ef18f8f00bfbbd573e5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e9d72625beeb4e21ae05d0b589e6208e] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=99.9 K 2024-12-06T08:21:17,121 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:17,121 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e073010fde8343d297cd1bbec93f5845, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c55fc02dbe324ef18f8f00bfbbd573e5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e9d72625beeb4e21ae05d0b589e6208e] 2024-12-06T08:21:17,121 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 51dffdebc3d04a5d92112d1659a04526, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733473273314 2024-12-06T08:21:17,121 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting e073010fde8343d297cd1bbec93f5845, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733473273314 2024-12-06T08:21:17,122 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting c55fc02dbe324ef18f8f00bfbbd573e5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473274436 2024-12-06T08:21:17,122 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c23769881d74cdebe9286d4af6a96e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473274436 2024-12-06T08:21:17,122 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e3f299b3f4c4eaf907599be0e7d5c52, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733473276594 2024-12-06T08:21:17,122 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9d72625beeb4e21ae05d0b589e6208e, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733473276588 2024-12-06T08:21:17,153 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:17,154 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#B#compaction#510 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:17,154 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/ffafc622a8f24b3990ed739e43bc394d is 50, key is test_row_0/B:col10/1733473276594/Put/seqid=0 2024-12-06T08:21:17,157 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206ced9ea39ac8b4cbc9f26fcda60e34d23_4407da04b0ac162131052d3fbc4bef2c store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:17,159 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206ced9ea39ac8b4cbc9f26fcda60e34d23_4407da04b0ac162131052d3fbc4bef2c, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:17,159 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206ced9ea39ac8b4cbc9f26fcda60e34d23_4407da04b0ac162131052d3fbc4bef2c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:17,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742427_1603 (size=12595) 2024-12-06T08:21:17,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742428_1604 (size=4469) 2024-12-06T08:21:17,191 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#A#compaction#511 average throughput is 0.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:17,192 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/8929ca1b50ee407db80b7a94f1e16bae is 175, key is test_row_0/A:col10/1733473276594/Put/seqid=0 2024-12-06T08:21:17,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742429_1605 (size=31549) 2024-12-06T08:21:17,213 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-06T08:21:17,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:17,214 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-06T08:21:17,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:17,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:17,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:17,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:17,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:17,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:17,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206b050d0c2e9b543e7935cf97f4209ad7e_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473276635/Put/seqid=0 2024-12-06T08:21:17,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742430_1606 (size=12304) 2024-12-06T08:21:17,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:17,232 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206b050d0c2e9b543e7935cf97f4209ad7e_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b050d0c2e9b543e7935cf97f4209ad7e_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:17,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/0b33bed025074e16ba1eeeeb7752348b, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:17,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/0b33bed025074e16ba1eeeeb7752348b is 175, key is test_row_0/A:col10/1733473276635/Put/seqid=0 2024-12-06T08:21:17,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742431_1607 (size=31105) 2024-12-06T08:21:17,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:17,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:17,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473337259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473337260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473337260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473337266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473337363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473337363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473337364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473337454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,456 DEBUG [Thread-2437 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:21:17,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T08:21:17,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473337565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473337566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473337566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,575 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/ffafc622a8f24b3990ed739e43bc394d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ffafc622a8f24b3990ed739e43bc394d 2024-12-06T08:21:17,579 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/B of 4407da04b0ac162131052d3fbc4bef2c into ffafc622a8f24b3990ed739e43bc394d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:17,579 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:17,579 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/B, priority=13, startTime=1733473277120; duration=0sec 2024-12-06T08:21:17,579 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:17,579 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:B 2024-12-06T08:21:17,579 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:17,580 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:17,580 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/C is initiating minor compaction (all files) 2024-12-06T08:21:17,580 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/C in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:17,580 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/e41f0c8a8ae54608b77cc2209fd1fe92, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/3b556d263ca84035b946964e7458209e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2908b69a03d7457f9a69850c01b6d9c2] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=35.9 K 2024-12-06T08:21:17,580 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e41f0c8a8ae54608b77cc2209fd1fe92, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733473273314 2024-12-06T08:21:17,581 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b556d263ca84035b946964e7458209e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473274436 2024-12-06T08:21:17,581 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2908b69a03d7457f9a69850c01b6d9c2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733473276594 2024-12-06T08:21:17,587 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#C#compaction#513 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:17,587 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/cdb6e07c3e174c919a4f3f3271adfd5c is 50, key is test_row_0/C:col10/1733473276594/Put/seqid=0 2024-12-06T08:21:17,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742432_1608 (size=12595) 2024-12-06T08:21:17,596 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/cdb6e07c3e174c919a4f3f3271adfd5c as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/cdb6e07c3e174c919a4f3f3271adfd5c 2024-12-06T08:21:17,601 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/C of 4407da04b0ac162131052d3fbc4bef2c into cdb6e07c3e174c919a4f3f3271adfd5c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:17,601 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:17,601 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/C, priority=13, startTime=1733473277120; duration=0sec 2024-12-06T08:21:17,602 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:17,602 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:C 2024-12-06T08:21:17,602 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/8929ca1b50ee407db80b7a94f1e16bae as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/8929ca1b50ee407db80b7a94f1e16bae 2024-12-06T08:21:17,606 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/A of 4407da04b0ac162131052d3fbc4bef2c into 8929ca1b50ee407db80b7a94f1e16bae(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:17,606 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:17,606 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/A, priority=13, startTime=1733473277119; duration=0sec 2024-12-06T08:21:17,606 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:17,606 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:A 2024-12-06T08:21:17,639 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/0b33bed025074e16ba1eeeeb7752348b 2024-12-06T08:21:17,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/c6700d97994a440da3556fee8f7f71b7 is 50, key is test_row_0/B:col10/1733473276635/Put/seqid=0 2024-12-06T08:21:17,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742433_1609 (size=12151) 2024-12-06T08:21:17,650 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/c6700d97994a440da3556fee8f7f71b7 2024-12-06T08:21:17,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/aec89d3724a44f379a0ee5598908be3a is 50, key is test_row_0/C:col10/1733473276635/Put/seqid=0 2024-12-06T08:21:17,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742434_1610 (size=12151) 2024-12-06T08:21:17,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473337773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473337867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473337869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:17,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:17,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473337870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,060 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/aec89d3724a44f379a0ee5598908be3a 2024-12-06T08:21:18,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/0b33bed025074e16ba1eeeeb7752348b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/0b33bed025074e16ba1eeeeb7752348b 2024-12-06T08:21:18,068 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/0b33bed025074e16ba1eeeeb7752348b, entries=150, sequenceid=210, filesize=30.4 K 2024-12-06T08:21:18,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/c6700d97994a440da3556fee8f7f71b7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/c6700d97994a440da3556fee8f7f71b7 2024-12-06T08:21:18,071 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/c6700d97994a440da3556fee8f7f71b7, entries=150, sequenceid=210, filesize=11.9 K 2024-12-06T08:21:18,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/aec89d3724a44f379a0ee5598908be3a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/aec89d3724a44f379a0ee5598908be3a 2024-12-06T08:21:18,075 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/aec89d3724a44f379a0ee5598908be3a, entries=150, sequenceid=210, filesize=11.9 K 2024-12-06T08:21:18,076 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 4407da04b0ac162131052d3fbc4bef2c in 861ms, sequenceid=210, compaction requested=false 2024-12-06T08:21:18,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:18,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:18,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-06T08:21:18,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-06T08:21:18,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-06T08:21:18,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6260 sec 2024-12-06T08:21:18,079 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.6300 sec 2024-12-06T08:21:18,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:18,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-06T08:21:18,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:18,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:18,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:18,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:18,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:18,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:18,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120604717e596ef84543a01c4db8e6a57fe1_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473278372/Put/seqid=0 2024-12-06T08:21:18,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742435_1611 (size=14794) 2024-12-06T08:21:18,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473338451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473338452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:18,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473338453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-06T08:21:18,554 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-06T08:21:18,555 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:21:18,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-06T08:21:18,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T08:21:18,557 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:21:18,557 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:21:18,558 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:21:18,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:18,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473338556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:18,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:18,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473338557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473338556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T08:21:18,709 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T08:21:18,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:18,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:18,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:18,710 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:18,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:18,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:18,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:18,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473338759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:18,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473338759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:18,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473338759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:18,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473338776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,794 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:18,797 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120604717e596ef84543a01c4db8e6a57fe1_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120604717e596ef84543a01c4db8e6a57fe1_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:18,798 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/31cb6fa2f6714862be7a8ee2ed9f16e0, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:18,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/31cb6fa2f6714862be7a8ee2ed9f16e0 is 175, key is test_row_0/A:col10/1733473278372/Put/seqid=0 2024-12-06T08:21:18,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742436_1612 (size=39749) 2024-12-06T08:21:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T08:21:18,862 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:18,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T08:21:18,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:18,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:18,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:18,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:18,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:19,016 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T08:21:19,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:19,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:19,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:19,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:19,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:19,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473339062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473339063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473339063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T08:21:19,169 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T08:21:19,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:19,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:19,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:19,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:19,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:19,204 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=226, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/31cb6fa2f6714862be7a8ee2ed9f16e0 2024-12-06T08:21:19,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/78d0c0c9fe63425baf72f02d439a4d5a is 50, key is test_row_0/B:col10/1733473278372/Put/seqid=0 2024-12-06T08:21:19,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742437_1613 (size=12151) 2024-12-06T08:21:19,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/78d0c0c9fe63425baf72f02d439a4d5a 2024-12-06T08:21:19,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/112766ca3d804c73a713772de7696262 is 50, key is test_row_0/C:col10/1733473278372/Put/seqid=0 2024-12-06T08:21:19,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742438_1614 (size=12151) 2024-12-06T08:21:19,230 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/112766ca3d804c73a713772de7696262 2024-12-06T08:21:19,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/31cb6fa2f6714862be7a8ee2ed9f16e0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/31cb6fa2f6714862be7a8ee2ed9f16e0 2024-12-06T08:21:19,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/31cb6fa2f6714862be7a8ee2ed9f16e0, entries=200, sequenceid=226, filesize=38.8 K 2024-12-06T08:21:19,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/78d0c0c9fe63425baf72f02d439a4d5a as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/78d0c0c9fe63425baf72f02d439a4d5a 2024-12-06T08:21:19,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/78d0c0c9fe63425baf72f02d439a4d5a, entries=150, sequenceid=226, filesize=11.9 K 2024-12-06T08:21:19,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/112766ca3d804c73a713772de7696262 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/112766ca3d804c73a713772de7696262 2024-12-06T08:21:19,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/112766ca3d804c73a713772de7696262, entries=150, sequenceid=226, filesize=11.9 K 2024-12-06T08:21:19,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 4407da04b0ac162131052d3fbc4bef2c in 872ms, sequenceid=226, compaction requested=true 2024-12-06T08:21:19,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:19,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:21:19,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:19,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:21:19,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:19,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:21:19,245 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:19,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:19,245 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:19,246 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:19,246 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:19,246 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/A is initiating minor compaction (all files) 2024-12-06T08:21:19,246 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/B is initiating minor compaction (all files) 2024-12-06T08:21:19,246 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/A in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:19,246 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/B in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:19,246 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/8929ca1b50ee407db80b7a94f1e16bae, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/0b33bed025074e16ba1eeeeb7752348b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/31cb6fa2f6714862be7a8ee2ed9f16e0] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=100.0 K 2024-12-06T08:21:19,246 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:19,246 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ffafc622a8f24b3990ed739e43bc394d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/c6700d97994a440da3556fee8f7f71b7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/78d0c0c9fe63425baf72f02d439a4d5a] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=36.0 K 2024-12-06T08:21:19,246 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/8929ca1b50ee407db80b7a94f1e16bae, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/0b33bed025074e16ba1eeeeb7752348b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/31cb6fa2f6714862be7a8ee2ed9f16e0] 2024-12-06T08:21:19,246 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ffafc622a8f24b3990ed739e43bc394d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733473276594 2024-12-06T08:21:19,246 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8929ca1b50ee407db80b7a94f1e16bae, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733473276594 2024-12-06T08:21:19,246 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting c6700d97994a440da3556fee8f7f71b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733473276630 2024-12-06T08:21:19,247 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b33bed025074e16ba1eeeeb7752348b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733473276630 2024-12-06T08:21:19,247 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 78d0c0c9fe63425baf72f02d439a4d5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733473277258 2024-12-06T08:21:19,247 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31cb6fa2f6714862be7a8ee2ed9f16e0, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733473277258 2024-12-06T08:21:19,252 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:19,253 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#B#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:19,254 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/d3b9460d2f4b436bb24bf5901379af9e is 50, key is test_row_0/B:col10/1733473278372/Put/seqid=0 2024-12-06T08:21:19,261 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412062434e48d9ca84f7491658af79101e473_4407da04b0ac162131052d3fbc4bef2c store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:19,262 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412062434e48d9ca84f7491658af79101e473_4407da04b0ac162131052d3fbc4bef2c, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:19,263 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412062434e48d9ca84f7491658af79101e473_4407da04b0ac162131052d3fbc4bef2c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:19,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742439_1615 (size=12697) 2024-12-06T08:21:19,269 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/d3b9460d2f4b436bb24bf5901379af9e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/d3b9460d2f4b436bb24bf5901379af9e 2024-12-06T08:21:19,273 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/B of 4407da04b0ac162131052d3fbc4bef2c into d3b9460d2f4b436bb24bf5901379af9e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:19,273 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:19,273 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/B, priority=13, startTime=1733473279245; duration=0sec 2024-12-06T08:21:19,273 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:19,273 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:B 2024-12-06T08:21:19,273 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:19,274 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:19,274 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/C is initiating minor compaction (all files) 2024-12-06T08:21:19,274 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/C in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:19,274 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/cdb6e07c3e174c919a4f3f3271adfd5c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/aec89d3724a44f379a0ee5598908be3a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/112766ca3d804c73a713772de7696262] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=36.0 K 2024-12-06T08:21:19,275 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting cdb6e07c3e174c919a4f3f3271adfd5c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733473276594 2024-12-06T08:21:19,275 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting aec89d3724a44f379a0ee5598908be3a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733473276630 2024-12-06T08:21:19,275 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 112766ca3d804c73a713772de7696262, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733473277258 2024-12-06T08:21:19,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742440_1616 (size=4469) 2024-12-06T08:21:19,280 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#A#compaction#520 average throughput is 0.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:19,280 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/693c0707b9fb4d538d22b64586a09dfd is 175, key is test_row_0/A:col10/1733473278372/Put/seqid=0 2024-12-06T08:21:19,285 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#C#compaction#521 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:19,285 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/2f98dcb1c42647e1948d027123f2ca43 is 50, key is test_row_0/C:col10/1733473278372/Put/seqid=0 2024-12-06T08:21:19,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742442_1618 (size=12697) 2024-12-06T08:21:19,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742441_1617 (size=31651) 2024-12-06T08:21:19,298 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/693c0707b9fb4d538d22b64586a09dfd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/693c0707b9fb4d538d22b64586a09dfd 2024-12-06T08:21:19,302 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/A of 4407da04b0ac162131052d3fbc4bef2c into 693c0707b9fb4d538d22b64586a09dfd(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:19,302 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:19,302 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/A, priority=13, startTime=1733473279245; duration=0sec 2024-12-06T08:21:19,302 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:19,302 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:A 2024-12-06T08:21:19,322 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-06T08:21:19,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:19,323 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-06T08:21:19,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:19,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:19,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:19,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:19,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:19,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:19,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e69c1978c6ec4639bdfbc66b04b8ef7a_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473278451/Put/seqid=0 2024-12-06T08:21:19,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742443_1619 (size=12304) 2024-12-06T08:21:19,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:19,336 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e69c1978c6ec4639bdfbc66b04b8ef7a_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e69c1978c6ec4639bdfbc66b04b8ef7a_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:19,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/d40032f5a4ec4d0a9702b89553b89b20, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:19,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/d40032f5a4ec4d0a9702b89553b89b20 is 175, key is test_row_0/A:col10/1733473278451/Put/seqid=0 2024-12-06T08:21:19,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742444_1620 (size=31105) 2024-12-06T08:21:19,345 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/d40032f5a4ec4d0a9702b89553b89b20 2024-12-06T08:21:19,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/7283842bae524cb09284982a51a53874 is 50, key is test_row_0/B:col10/1733473278451/Put/seqid=0 2024-12-06T08:21:19,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742445_1621 (size=12151) 2024-12-06T08:21:19,355 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/7283842bae524cb09284982a51a53874 2024-12-06T08:21:19,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/ac80817d29ff4ca59ad57a7e10cd2428 is 50, key is test_row_0/C:col10/1733473278451/Put/seqid=0 2024-12-06T08:21:19,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742446_1622 (size=12151) 2024-12-06T08:21:19,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:19,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:19,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473339576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473339576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473339576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T08:21:19,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473339680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473339680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473339680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,694 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/2f98dcb1c42647e1948d027123f2ca43 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2f98dcb1c42647e1948d027123f2ca43 2024-12-06T08:21:19,698 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/C of 4407da04b0ac162131052d3fbc4bef2c into 2f98dcb1c42647e1948d027123f2ca43(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:19,698 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:19,698 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/C, priority=13, startTime=1733473279245; duration=0sec 2024-12-06T08:21:19,699 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:19,699 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:C 2024-12-06T08:21:19,770 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/ac80817d29ff4ca59ad57a7e10cd2428 2024-12-06T08:21:19,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/d40032f5a4ec4d0a9702b89553b89b20 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/d40032f5a4ec4d0a9702b89553b89b20 2024-12-06T08:21:19,777 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/d40032f5a4ec4d0a9702b89553b89b20, entries=150, sequenceid=252, filesize=30.4 K 2024-12-06T08:21:19,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/7283842bae524cb09284982a51a53874 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7283842bae524cb09284982a51a53874 2024-12-06T08:21:19,781 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7283842bae524cb09284982a51a53874, entries=150, sequenceid=252, filesize=11.9 K 2024-12-06T08:21:19,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/ac80817d29ff4ca59ad57a7e10cd2428 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ac80817d29ff4ca59ad57a7e10cd2428 2024-12-06T08:21:19,788 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ac80817d29ff4ca59ad57a7e10cd2428, entries=150, sequenceid=252, filesize=11.9 K 2024-12-06T08:21:19,789 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 4407da04b0ac162131052d3fbc4bef2c in 466ms, sequenceid=252, compaction requested=false 2024-12-06T08:21:19,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:19,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:19,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-06T08:21:19,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-06T08:21:19,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-06T08:21:19,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2320 sec 2024-12-06T08:21:19,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.2370 sec 2024-12-06T08:21:19,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:19,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-06T08:21:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:19,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063deed36ad1604a68a8774221ee60139f_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473279884/Put/seqid=0 2024-12-06T08:21:19,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473339904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473339907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:19,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473339908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:19,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742447_1623 (size=12454) 2024-12-06T08:21:19,920 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:19,923 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412063deed36ad1604a68a8774221ee60139f_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063deed36ad1604a68a8774221ee60139f_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:19,924 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/fbe58eeb5e5c4e7c863d41fb6a5cac92, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:19,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/fbe58eeb5e5c4e7c863d41fb6a5cac92 is 175, key is test_row_0/A:col10/1733473279884/Put/seqid=0 2024-12-06T08:21:19,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742448_1624 (size=31255) 2024-12-06T08:21:20,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:20,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473340008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:20,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473340011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:20,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473340011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:20,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473340212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:20,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473340214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:20,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473340215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,329 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=267, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/fbe58eeb5e5c4e7c863d41fb6a5cac92 2024-12-06T08:21:20,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/e9b313d0fc854658b645176b4563b590 is 50, key is test_row_0/B:col10/1733473279884/Put/seqid=0 2024-12-06T08:21:20,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742449_1625 (size=12301) 2024-12-06T08:21:20,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:20,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473340515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:20,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473340516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:20,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473340517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-06T08:21:20,661 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-06T08:21:20,662 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:21:20,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-12-06T08:21:20,664 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:21:20,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T08:21:20,664 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:21:20,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:21:20,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=267 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/e9b313d0fc854658b645176b4563b590 2024-12-06T08:21:20,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/69d01f22ec614d83bb08c922c4d51a4d is 50, key is test_row_0/C:col10/1733473279884/Put/seqid=0 2024-12-06T08:21:20,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742450_1626 (size=12301) 2024-12-06T08:21:20,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T08:21:20,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:20,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473340785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,787 DEBUG [Thread-2435 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:21:20,815 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T08:21:20,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:20,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:20,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:20,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:20,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:20,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:20,968 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:20,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T08:21:20,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T08:21:20,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:20,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:20,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:20,969 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:20,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:20,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:21,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:21,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473341020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:21,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:21,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473341021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:21,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:21,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473341023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:21,121 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:21,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T08:21:21,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:21,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:21,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:21,121 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:21,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:21,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:21,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=267 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/69d01f22ec614d83bb08c922c4d51a4d 2024-12-06T08:21:21,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/fbe58eeb5e5c4e7c863d41fb6a5cac92 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fbe58eeb5e5c4e7c863d41fb6a5cac92 2024-12-06T08:21:21,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fbe58eeb5e5c4e7c863d41fb6a5cac92, entries=150, sequenceid=267, filesize=30.5 K 2024-12-06T08:21:21,162 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(992): StoreScanner already has the close lock. There is no need to updateReaders 2024-12-06T08:21:21,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/e9b313d0fc854658b645176b4563b590 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/e9b313d0fc854658b645176b4563b590 2024-12-06T08:21:21,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/e9b313d0fc854658b645176b4563b590, entries=150, sequenceid=267, filesize=12.0 K 2024-12-06T08:21:21,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/69d01f22ec614d83bb08c922c4d51a4d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/69d01f22ec614d83bb08c922c4d51a4d 2024-12-06T08:21:21,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/69d01f22ec614d83bb08c922c4d51a4d, entries=150, sequenceid=267, filesize=12.0 K 2024-12-06T08:21:21,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 4407da04b0ac162131052d3fbc4bef2c in 1286ms, sequenceid=267, compaction requested=true 2024-12-06T08:21:21,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:21,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:21:21,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:21,170 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:21,170 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:21,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:21:21,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:21,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:21:21,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:21,171 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94011 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:21,171 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:21,171 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/A is initiating minor compaction (all files) 2024-12-06T08:21:21,171 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/B is initiating minor compaction (all files) 2024-12-06T08:21:21,172 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/A in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:21,172 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/B in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:21,172 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/693c0707b9fb4d538d22b64586a09dfd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/d40032f5a4ec4d0a9702b89553b89b20, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fbe58eeb5e5c4e7c863d41fb6a5cac92] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=91.8 K 2024-12-06T08:21:21,172 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/d3b9460d2f4b436bb24bf5901379af9e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7283842bae524cb09284982a51a53874, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/e9b313d0fc854658b645176b4563b590] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=36.3 K 2024-12-06T08:21:21,172 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:21,172 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/693c0707b9fb4d538d22b64586a09dfd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/d40032f5a4ec4d0a9702b89553b89b20, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fbe58eeb5e5c4e7c863d41fb6a5cac92] 2024-12-06T08:21:21,172 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting d3b9460d2f4b436bb24bf5901379af9e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733473277258 2024-12-06T08:21:21,172 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 693c0707b9fb4d538d22b64586a09dfd, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733473277258 2024-12-06T08:21:21,172 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7283842bae524cb09284982a51a53874, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733473278446 2024-12-06T08:21:21,172 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting d40032f5a4ec4d0a9702b89553b89b20, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733473278446 2024-12-06T08:21:21,173 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting e9b313d0fc854658b645176b4563b590, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1733473279572 2024-12-06T08:21:21,173 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting fbe58eeb5e5c4e7c863d41fb6a5cac92, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1733473279572 2024-12-06T08:21:21,179 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#B#compaction#528 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:21,180 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/a7d63bf6a8a04eefbc7be8f1cbb5935d is 50, key is test_row_0/B:col10/1733473279884/Put/seqid=0 2024-12-06T08:21:21,181 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:21,186 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206350e7320237b45fe8f3ee7fdc044826a_4407da04b0ac162131052d3fbc4bef2c store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:21,188 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206350e7320237b45fe8f3ee7fdc044826a_4407da04b0ac162131052d3fbc4bef2c, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:21,189 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206350e7320237b45fe8f3ee7fdc044826a_4407da04b0ac162131052d3fbc4bef2c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:21,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742451_1627 (size=12949) 2024-12-06T08:21:21,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742452_1628 (size=4469) 2024-12-06T08:21:21,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T08:21:21,273 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:21,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-06T08:21:21,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:21,274 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-06T08:21:21,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:21,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:21,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:21,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:21,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:21,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:21,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120681874dd0739445eba47c734e58b0520d_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473279906/Put/seqid=0 2024-12-06T08:21:21,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742453_1629 (size=12454) 2024-12-06T08:21:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:21,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:21,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473341486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:21,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:21,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473341589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:21,600 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/a7d63bf6a8a04eefbc7be8f1cbb5935d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/a7d63bf6a8a04eefbc7be8f1cbb5935d 2024-12-06T08:21:21,603 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#A#compaction#529 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:21,604 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/B of 4407da04b0ac162131052d3fbc4bef2c into a7d63bf6a8a04eefbc7be8f1cbb5935d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:21,604 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:21,604 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/5039d83e4e7c4625b15fae98debba9c6 is 175, key is test_row_0/A:col10/1733473279884/Put/seqid=0 2024-12-06T08:21:21,604 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/B, priority=13, startTime=1733473281170; duration=0sec 2024-12-06T08:21:21,604 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:21,604 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:B 2024-12-06T08:21:21,604 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:21,605 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:21,605 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/C is initiating minor compaction (all files) 2024-12-06T08:21:21,605 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/C in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:21,605 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2f98dcb1c42647e1948d027123f2ca43, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ac80817d29ff4ca59ad57a7e10cd2428, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/69d01f22ec614d83bb08c922c4d51a4d] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=36.3 K 2024-12-06T08:21:21,606 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f98dcb1c42647e1948d027123f2ca43, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733473277258 2024-12-06T08:21:21,606 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting ac80817d29ff4ca59ad57a7e10cd2428, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733473278446 2024-12-06T08:21:21,607 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 69d01f22ec614d83bb08c922c4d51a4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1733473279572 2024-12-06T08:21:21,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742454_1630 (size=31903) 2024-12-06T08:21:21,612 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#C#compaction#531 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:21,613 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/b7afb6961b864e82baed2dfeced0e002 is 50, key is test_row_0/C:col10/1733473279884/Put/seqid=0 2024-12-06T08:21:21,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742455_1631 (size=12949) 2024-12-06T08:21:21,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:21,688 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120681874dd0739445eba47c734e58b0520d_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120681874dd0739445eba47c734e58b0520d_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:21,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/3d0dd70a30834edbbf12d680793ee44d, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:21,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/3d0dd70a30834edbbf12d680793ee44d is 175, key is test_row_0/A:col10/1733473279906/Put/seqid=0 2024-12-06T08:21:21,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742456_1632 (size=31255) 2024-12-06T08:21:21,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T08:21:21,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:21,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473341792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:22,012 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/5039d83e4e7c4625b15fae98debba9c6 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/5039d83e4e7c4625b15fae98debba9c6 2024-12-06T08:21:22,016 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/A of 4407da04b0ac162131052d3fbc4bef2c into 5039d83e4e7c4625b15fae98debba9c6(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:22,016 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:22,016 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/A, priority=13, startTime=1733473281170; duration=0sec 2024-12-06T08:21:22,016 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:22,016 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:A 2024-12-06T08:21:22,020 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/b7afb6961b864e82baed2dfeced0e002 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b7afb6961b864e82baed2dfeced0e002 2024-12-06T08:21:22,023 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/C of 4407da04b0ac162131052d3fbc4bef2c into b7afb6961b864e82baed2dfeced0e002(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:22,023 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:22,023 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/C, priority=13, startTime=1733473281171; duration=0sec 2024-12-06T08:21:22,023 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:22,024 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:C 2024-12-06T08:21:22,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:22,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473342025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:22,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:22,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473342026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:22,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:22,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473342033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:22,093 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/3d0dd70a30834edbbf12d680793ee44d 2024-12-06T08:21:22,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:22,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473342095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:22,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/b871d4cfb65b4cca94935bb445e1d5d4 is 50, key is test_row_0/B:col10/1733473279906/Put/seqid=0 2024-12-06T08:21:22,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742457_1633 (size=12301) 2024-12-06T08:21:22,107 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/b871d4cfb65b4cca94935bb445e1d5d4 2024-12-06T08:21:22,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/01380b6ffdcf4844bcd3f75eb602f159 is 50, key is test_row_0/C:col10/1733473279906/Put/seqid=0 2024-12-06T08:21:22,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742458_1634 (size=12301) 2024-12-06T08:21:22,518 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/01380b6ffdcf4844bcd3f75eb602f159 2024-12-06T08:21:22,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/3d0dd70a30834edbbf12d680793ee44d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3d0dd70a30834edbbf12d680793ee44d 2024-12-06T08:21:22,525 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3d0dd70a30834edbbf12d680793ee44d, entries=150, sequenceid=290, filesize=30.5 K 2024-12-06T08:21:22,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/b871d4cfb65b4cca94935bb445e1d5d4 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/b871d4cfb65b4cca94935bb445e1d5d4 2024-12-06T08:21:22,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,533 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/b871d4cfb65b4cca94935bb445e1d5d4, entries=150, sequenceid=290, filesize=12.0 K 2024-12-06T08:21:22,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/01380b6ffdcf4844bcd3f75eb602f159 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/01380b6ffdcf4844bcd3f75eb602f159 2024-12-06T08:21:22,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,537 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/01380b6ffdcf4844bcd3f75eb602f159, entries=150, sequenceid=290, filesize=12.0 K 2024-12-06T08:21:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,538 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 4407da04b0ac162131052d3fbc4bef2c in 1264ms, sequenceid=290, compaction requested=false 2024-12-06T08:21:22,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:22,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:22,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-06T08:21:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-12-06T08:21:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-06T08:21:22,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8750 sec 2024-12-06T08:21:22,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.8790 sec 2024-12-06T08:21:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:22,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-06T08:21:22,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:22,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:22,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:22,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:22,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412067cd0c7cc584d4e61870ee26f76863790_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473282601/Put/seqid=0 2024-12-06T08:21:22,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742459_1635 (size=12454) 2024-12-06T08:21:22,613 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:22,617 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412067cd0c7cc584d4e61870ee26f76863790_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067cd0c7cc584d4e61870ee26f76863790_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:22,618 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/75f37378b13241bf91f51141b6c5261e, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:22,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/75f37378b13241bf91f51141b6c5261e is 175, key is test_row_0/A:col10/1733473282601/Put/seqid=0 2024-12-06T08:21:22,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742460_1636 (size=31255) 2024-12-06T08:21:22,630 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=307, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/75f37378b13241bf91f51141b6c5261e 2024-12-06T08:21:22,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/211ec5fe2b9b4d5d91b2dc13d672ab2b is 50, key is test_row_0/B:col10/1733473282601/Put/seqid=0 2024-12-06T08:21:22,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742461_1637 (size=12301) 2024-12-06T08:21:22,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:22,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473342657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:22,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:22,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473342760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:22,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-06T08:21:22,771 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-06T08:21:22,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-06T08:21:22,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-12-06T08:21:22,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T08:21:22,774 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:21:22,775 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:21:22,775 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:21:22,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T08:21:22,926 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:22,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T08:21:22,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:22,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:22,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:22,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:22,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:22,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:22,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:22,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473342962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:23,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/211ec5fe2b9b4d5d91b2dc13d672ab2b 2024-12-06T08:21:23,047 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/47711c65ad924856964ca3aa9fd6b3ae is 50, key is test_row_0/C:col10/1733473282601/Put/seqid=0 2024-12-06T08:21:23,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742462_1638 (size=12301) 2024-12-06T08:21:23,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T08:21:23,079 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:23,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T08:21:23,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:23,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:23,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:23,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:23,231 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:23,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T08:21:23,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:23,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:23,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:23,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:23,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:23,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473343266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:23,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T08:21:23,384 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:23,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T08:21:23,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:23,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:23,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:23,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:21:23,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/47711c65ad924856964ca3aa9fd6b3ae 2024-12-06T08:21:23,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/75f37378b13241bf91f51141b6c5261e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/75f37378b13241bf91f51141b6c5261e 2024-12-06T08:21:23,459 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/75f37378b13241bf91f51141b6c5261e, entries=150, sequenceid=307, filesize=30.5 K 2024-12-06T08:21:23,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/211ec5fe2b9b4d5d91b2dc13d672ab2b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/211ec5fe2b9b4d5d91b2dc13d672ab2b 2024-12-06T08:21:23,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/211ec5fe2b9b4d5d91b2dc13d672ab2b, entries=150, sequenceid=307, filesize=12.0 K 2024-12-06T08:21:23,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/47711c65ad924856964ca3aa9fd6b3ae as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/47711c65ad924856964ca3aa9fd6b3ae 2024-12-06T08:21:23,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/47711c65ad924856964ca3aa9fd6b3ae, entries=150, sequenceid=307, filesize=12.0 K 2024-12-06T08:21:23,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 4407da04b0ac162131052d3fbc4bef2c in 865ms, sequenceid=307, compaction requested=true 2024-12-06T08:21:23,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:23,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:21:23,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:23,467 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:23,467 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:23,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:21:23,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:23,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:21:23,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:23,468 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:23,468 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:23,468 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/A is initiating minor compaction (all files) 2024-12-06T08:21:23,468 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/B is initiating minor compaction (all files) 2024-12-06T08:21:23,468 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/A in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,468 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/B in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,469 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/5039d83e4e7c4625b15fae98debba9c6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3d0dd70a30834edbbf12d680793ee44d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/75f37378b13241bf91f51141b6c5261e] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=92.2 K 2024-12-06T08:21:23,469 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/a7d63bf6a8a04eefbc7be8f1cbb5935d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/b871d4cfb65b4cca94935bb445e1d5d4, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/211ec5fe2b9b4d5d91b2dc13d672ab2b] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=36.7 K 2024-12-06T08:21:23,469 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,469 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/5039d83e4e7c4625b15fae98debba9c6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3d0dd70a30834edbbf12d680793ee44d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/75f37378b13241bf91f51141b6c5261e] 2024-12-06T08:21:23,469 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting a7d63bf6a8a04eefbc7be8f1cbb5935d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1733473279572 2024-12-06T08:21:23,469 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5039d83e4e7c4625b15fae98debba9c6, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1733473279572 2024-12-06T08:21:23,469 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting b871d4cfb65b4cca94935bb445e1d5d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733473279902 2024-12-06T08:21:23,469 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d0dd70a30834edbbf12d680793ee44d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733473279902 2024-12-06T08:21:23,470 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 211ec5fe2b9b4d5d91b2dc13d672ab2b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1733473281467 2024-12-06T08:21:23,470 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75f37378b13241bf91f51141b6c5261e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1733473281467 2024-12-06T08:21:23,475 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#B#compaction#537 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:23,476 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/fc43e092d0fa4d80841da15afb76fb77 is 50, key is test_row_0/B:col10/1733473282601/Put/seqid=0 2024-12-06T08:21:23,478 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:23,480 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206f5df60bdeb184d86b0679a40e58364d5_4407da04b0ac162131052d3fbc4bef2c store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:23,482 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206f5df60bdeb184d86b0679a40e58364d5_4407da04b0ac162131052d3fbc4bef2c, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:23,482 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206f5df60bdeb184d86b0679a40e58364d5_4407da04b0ac162131052d3fbc4bef2c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:23,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742463_1639 (size=13051) 2024-12-06T08:21:23,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742464_1640 (size=4469) 2024-12-06T08:21:23,487 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#A#compaction#538 average throughput is 2.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:23,487 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/fb6f5ddb2f0a4b589364015ed7e4ff20 is 175, key is test_row_0/A:col10/1733473282601/Put/seqid=0 2024-12-06T08:21:23,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742465_1641 (size=32005) 2024-12-06T08:21:23,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:23,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38041 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-06T08:21:23,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,537 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T08:21:23,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:23,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:23,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:23,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:23,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:23,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:23,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412069f2321e93cb64948b2e8799cfdb0d6fe_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473282656/Put/seqid=0 2024-12-06T08:21:23,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742466_1642 (size=12454) 2024-12-06T08:21:23,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. as already flushing 2024-12-06T08:21:23,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:23,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:23,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473343806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:23,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T08:21:23,887 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/fc43e092d0fa4d80841da15afb76fb77 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/fc43e092d0fa4d80841da15afb76fb77 2024-12-06T08:21:23,893 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/B of 4407da04b0ac162131052d3fbc4bef2c into fc43e092d0fa4d80841da15afb76fb77(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:23,893 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:23,893 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/B, priority=13, startTime=1733473283467; duration=0sec 2024-12-06T08:21:23,893 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:23,893 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:B 2024-12-06T08:21:23,893 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:23,894 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:23,894 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/C is initiating minor compaction (all files) 2024-12-06T08:21:23,894 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/fb6f5ddb2f0a4b589364015ed7e4ff20 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fb6f5ddb2f0a4b589364015ed7e4ff20 2024-12-06T08:21:23,894 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/C in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:23,895 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b7afb6961b864e82baed2dfeced0e002, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/01380b6ffdcf4844bcd3f75eb602f159, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/47711c65ad924856964ca3aa9fd6b3ae] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=36.7 K 2024-12-06T08:21:23,895 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting b7afb6961b864e82baed2dfeced0e002, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1733473279572 2024-12-06T08:21:23,895 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 01380b6ffdcf4844bcd3f75eb602f159, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733473279902 2024-12-06T08:21:23,896 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 47711c65ad924856964ca3aa9fd6b3ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1733473281467 2024-12-06T08:21:23,899 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/A of 4407da04b0ac162131052d3fbc4bef2c into fb6f5ddb2f0a4b589364015ed7e4ff20(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:23,899 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:23,899 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/A, priority=13, startTime=1733473283467; duration=0sec 2024-12-06T08:21:23,899 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:23,899 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:A 2024-12-06T08:21:23,902 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#C#compaction#540 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:23,902 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/5058c25c504b41ab90f19b363e50fdb7 is 50, key is test_row_0/C:col10/1733473282601/Put/seqid=0 2024-12-06T08:21:23,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742467_1643 (size=13051) 2024-12-06T08:21:23,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:23,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473343909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:23,912 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/5058c25c504b41ab90f19b363e50fdb7 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/5058c25c504b41ab90f19b363e50fdb7 2024-12-06T08:21:23,916 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/C of 4407da04b0ac162131052d3fbc4bef2c into 5058c25c504b41ab90f19b363e50fdb7(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:23,916 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:23,916 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/C, priority=13, startTime=1733473283468; duration=0sec 2024-12-06T08:21:23,916 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:23,916 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:C 2024-12-06T08:21:23,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:23,952 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412069f2321e93cb64948b2e8799cfdb0d6fe_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069f2321e93cb64948b2e8799cfdb0d6fe_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:23,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/3cbf8592e347450e91a28635ceeac112, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:23,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/3cbf8592e347450e91a28635ceeac112 is 175, key is test_row_0/A:col10/1733473282656/Put/seqid=0 2024-12-06T08:21:23,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742468_1644 (size=31255) 2024-12-06T08:21:24,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:24,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1733473344037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:24,038 DEBUG [Thread-2441 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4131 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:21:24,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:24,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37612 deadline: 1733473344045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:24,046 DEBUG [Thread-2443 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:21:24,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:24,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37572 deadline: 1733473344046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:24,048 DEBUG [Thread-2439 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:21:24,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:24,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473344112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:24,358 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=329, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/3cbf8592e347450e91a28635ceeac112 2024-12-06T08:21:24,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/7038b3b0e4a44ae693ec05ac72125a84 is 50, key is test_row_0/B:col10/1733473282656/Put/seqid=0 2024-12-06T08:21:24,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742469_1645 (size=12301) 2024-12-06T08:21:24,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473344416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:24,768 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/7038b3b0e4a44ae693ec05ac72125a84 2024-12-06T08:21:24,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/28d0f32c69a747c295d8c5f5879796ad is 50, key is test_row_0/C:col10/1733473282656/Put/seqid=0 2024-12-06T08:21:24,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742470_1646 (size=12301) 2024-12-06T08:21:24,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:24,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37604 deadline: 1733473344813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:24,814 DEBUG [Thread-2435 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., hostname=b6b797fc3981,38041,1733473111442, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:21:24,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T08:21:24,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:24,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473344919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:25,186 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/28d0f32c69a747c295d8c5f5879796ad 2024-12-06T08:21:25,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/3cbf8592e347450e91a28635ceeac112 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3cbf8592e347450e91a28635ceeac112 2024-12-06T08:21:25,193 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3cbf8592e347450e91a28635ceeac112, entries=150, sequenceid=329, filesize=30.5 K 2024-12-06T08:21:25,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/7038b3b0e4a44ae693ec05ac72125a84 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7038b3b0e4a44ae693ec05ac72125a84 2024-12-06T08:21:25,197 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7038b3b0e4a44ae693ec05ac72125a84, entries=150, sequenceid=329, filesize=12.0 K 2024-12-06T08:21:25,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/28d0f32c69a747c295d8c5f5879796ad as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/28d0f32c69a747c295d8c5f5879796ad 2024-12-06T08:21:25,200 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/28d0f32c69a747c295d8c5f5879796ad, entries=150, sequenceid=329, filesize=12.0 K 2024-12-06T08:21:25,201 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 4407da04b0ac162131052d3fbc4bef2c in 1664ms, sequenceid=329, compaction requested=false 2024-12-06T08:21:25,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:25,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:25,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-06T08:21:25,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-12-06T08:21:25,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-06T08:21:25,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4270 sec 2024-12-06T08:21:25,205 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 2.4320 sec 2024-12-06T08:21:25,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:25,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-06T08:21:25,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:25,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:25,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:25,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:25,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:25,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:25,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068ce9e77064d44f958da5bdbcc8e2c01a_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473283803/Put/seqid=0 2024-12-06T08:21:25,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742471_1647 (size=12454) 2024-12-06T08:21:25,950 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:25,953 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412068ce9e77064d44f958da5bdbcc8e2c01a_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068ce9e77064d44f958da5bdbcc8e2c01a_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:25,954 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/2b6ad138e24346d1ade5d01146ad2cb5, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:25,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/2b6ad138e24346d1ade5d01146ad2cb5 is 175, key is test_row_0/A:col10/1733473283803/Put/seqid=0 2024-12-06T08:21:25,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742472_1648 (size=31255) 2024-12-06T08:21:25,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:25,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473345990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:26,036 DEBUG [Thread-2448 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e757135 to 127.0.0.1:65195 2024-12-06T08:21:26,036 DEBUG [Thread-2448 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:26,037 DEBUG [Thread-2450 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7846cb78 to 127.0.0.1:65195 2024-12-06T08:21:26,037 DEBUG [Thread-2450 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:26,038 DEBUG [Thread-2454 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d9113f3 to 127.0.0.1:65195 2024-12-06T08:21:26,038 DEBUG [Thread-2454 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:26,038 DEBUG [Thread-2452 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f1754bc to 127.0.0.1:65195 2024-12-06T08:21:26,038 DEBUG [Thread-2452 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:26,038 DEBUG [Thread-2446 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x69abefea to 127.0.0.1:65195 2024-12-06T08:21:26,038 DEBUG [Thread-2446 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:26,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473346093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:26,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:26,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473346295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:26,360 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=347, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/2b6ad138e24346d1ade5d01146ad2cb5 2024-12-06T08:21:26,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/f4fb2f47bcab45d38709e1a3d21c3bbd is 50, key is test_row_0/B:col10/1733473283803/Put/seqid=0 2024-12-06T08:21:26,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742473_1649 (size=12301) 2024-12-06T08:21:26,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:26,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473346597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:26,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/f4fb2f47bcab45d38709e1a3d21c3bbd 2024-12-06T08:21:26,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/8b7acaf316484e859c94bf7bfa4a7afc is 50, key is test_row_0/C:col10/1733473283803/Put/seqid=0 2024-12-06T08:21:26,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742474_1650 (size=12301) 2024-12-06T08:21:26,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-06T08:21:26,878 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-12-06T08:21:27,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:21:27,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37526 deadline: 1733473347099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:27,177 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/8b7acaf316484e859c94bf7bfa4a7afc 2024-12-06T08:21:27,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/2b6ad138e24346d1ade5d01146ad2cb5 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/2b6ad138e24346d1ade5d01146ad2cb5 2024-12-06T08:21:27,184 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/2b6ad138e24346d1ade5d01146ad2cb5, entries=150, sequenceid=347, filesize=30.5 K 2024-12-06T08:21:27,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/f4fb2f47bcab45d38709e1a3d21c3bbd as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/f4fb2f47bcab45d38709e1a3d21c3bbd 2024-12-06T08:21:27,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/f4fb2f47bcab45d38709e1a3d21c3bbd, entries=150, sequenceid=347, filesize=12.0 K 2024-12-06T08:21:27,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/8b7acaf316484e859c94bf7bfa4a7afc as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/8b7acaf316484e859c94bf7bfa4a7afc 2024-12-06T08:21:27,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/8b7acaf316484e859c94bf7bfa4a7afc, entries=150, sequenceid=347, filesize=12.0 K 2024-12-06T08:21:27,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 4407da04b0ac162131052d3fbc4bef2c in 1251ms, sequenceid=347, compaction requested=true 2024-12-06T08:21:27,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:27,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:21:27,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:27,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:21:27,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:27,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4407da04b0ac162131052d3fbc4bef2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-06T08:21:27,191 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:27,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:27,191 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:27,192 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:27,192 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:27,192 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/B is initiating minor compaction (all files) 2024-12-06T08:21:27,192 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/A is initiating minor compaction (all files) 2024-12-06T08:21:27,192 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/B in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:27,192 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/A in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:27,192 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fb6f5ddb2f0a4b589364015ed7e4ff20, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3cbf8592e347450e91a28635ceeac112, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/2b6ad138e24346d1ade5d01146ad2cb5] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=92.3 K 2024-12-06T08:21:27,192 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/fc43e092d0fa4d80841da15afb76fb77, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7038b3b0e4a44ae693ec05ac72125a84, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/f4fb2f47bcab45d38709e1a3d21c3bbd] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=36.8 K 2024-12-06T08:21:27,192 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:27,193 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. files: [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fb6f5ddb2f0a4b589364015ed7e4ff20, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3cbf8592e347450e91a28635ceeac112, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/2b6ad138e24346d1ade5d01146ad2cb5] 2024-12-06T08:21:27,193 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting fc43e092d0fa4d80841da15afb76fb77, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1733473281467 2024-12-06T08:21:27,193 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb6f5ddb2f0a4b589364015ed7e4ff20, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1733473281467 2024-12-06T08:21:27,193 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 7038b3b0e4a44ae693ec05ac72125a84, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1733473282634 2024-12-06T08:21:27,193 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3cbf8592e347450e91a28635ceeac112, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1733473282634 2024-12-06T08:21:27,193 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting f4fb2f47bcab45d38709e1a3d21c3bbd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733473283798 2024-12-06T08:21:27,193 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b6ad138e24346d1ade5d01146ad2cb5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733473283798 2024-12-06T08:21:27,199 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#B#compaction#546 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:27,199 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/00fb589296c74268aa05370e5138b85e is 50, key is test_row_0/B:col10/1733473283803/Put/seqid=0 2024-12-06T08:21:27,203 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:27,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742475_1651 (size=13153) 2024-12-06T08:21:27,204 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241206c10380ea45b14cff9f84918d7edf9454_4407da04b0ac162131052d3fbc4bef2c store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:27,206 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/00fb589296c74268aa05370e5138b85e as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/00fb589296c74268aa05370e5138b85e 2024-12-06T08:21:27,210 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/B of 4407da04b0ac162131052d3fbc4bef2c into 00fb589296c74268aa05370e5138b85e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:27,210 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:27,210 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/B, priority=13, startTime=1733473287191; duration=0sec 2024-12-06T08:21:27,210 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-06T08:21:27,210 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:B 2024-12-06T08:21:27,210 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:21:27,211 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:21:27,211 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1540): 4407da04b0ac162131052d3fbc4bef2c/C is initiating minor compaction (all files) 2024-12-06T08:21:27,211 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4407da04b0ac162131052d3fbc4bef2c/C in TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:27,211 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/5058c25c504b41ab90f19b363e50fdb7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/28d0f32c69a747c295d8c5f5879796ad, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/8b7acaf316484e859c94bf7bfa4a7afc] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp, totalSize=36.8 K 2024-12-06T08:21:27,211 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 5058c25c504b41ab90f19b363e50fdb7, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1733473281467 2024-12-06T08:21:27,211 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 28d0f32c69a747c295d8c5f5879796ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1733473282634 2024-12-06T08:21:27,212 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b7acaf316484e859c94bf7bfa4a7afc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733473283798 2024-12-06T08:21:27,221 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#C#compaction#548 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:27,221 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/0b82d35c9a5d4156a6c216de082c7b07 is 50, key is test_row_0/C:col10/1733473283803/Put/seqid=0 2024-12-06T08:21:27,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742476_1652 (size=13153) 2024-12-06T08:21:27,229 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241206c10380ea45b14cff9f84918d7edf9454_4407da04b0ac162131052d3fbc4bef2c, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:27,229 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206c10380ea45b14cff9f84918d7edf9454_4407da04b0ac162131052d3fbc4bef2c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:27,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742477_1653 (size=4469) 2024-12-06T08:21:27,233 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/0b82d35c9a5d4156a6c216de082c7b07 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/0b82d35c9a5d4156a6c216de082c7b07 2024-12-06T08:21:27,234 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4407da04b0ac162131052d3fbc4bef2c#A#compaction#547 average throughput is 0.79 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:21:27,235 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/fecd2fe8db69455b9be5701518b4eda3 is 175, key is test_row_0/A:col10/1733473283803/Put/seqid=0 2024-12-06T08:21:27,237 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/C of 4407da04b0ac162131052d3fbc4bef2c into 0b82d35c9a5d4156a6c216de082c7b07(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:27,237 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:27,237 INFO [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/C, priority=13, startTime=1733473287191; duration=0sec 2024-12-06T08:21:27,237 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:27,237 DEBUG [RS:0;b6b797fc3981:38041-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:C 2024-12-06T08:21:27,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742478_1654 (size=32107) 2024-12-06T08:21:27,258 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/fecd2fe8db69455b9be5701518b4eda3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fecd2fe8db69455b9be5701518b4eda3 2024-12-06T08:21:27,261 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4407da04b0ac162131052d3fbc4bef2c/A of 4407da04b0ac162131052d3fbc4bef2c into fecd2fe8db69455b9be5701518b4eda3(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:21:27,261 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:27,261 INFO [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c., storeName=4407da04b0ac162131052d3fbc4bef2c/A, priority=13, startTime=1733473287191; duration=0sec 2024-12-06T08:21:27,262 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:21:27,262 DEBUG [RS:0;b6b797fc3981:38041-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4407da04b0ac162131052d3fbc4bef2c:A 2024-12-06T08:21:28,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38041 {}] regionserver.HRegion(8581): Flush requested on 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:28,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-06T08:21:28,070 DEBUG [Thread-2441 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x103dfc6e to 127.0.0.1:65195 2024-12-06T08:21:28,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:28,070 DEBUG [Thread-2441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:28,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:28,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:28,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:28,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:28,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:28,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e37ad8e2d7a24d0a921b9c1a4576fdf0_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473288069/Put/seqid=0 2024-12-06T08:21:28,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742479_1655 (size=12454) 2024-12-06T08:21:28,081 DEBUG [Thread-2439 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06094c70 to 127.0.0.1:65195 2024-12-06T08:21:28,081 DEBUG [Thread-2439 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:28,086 DEBUG [Thread-2443 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e047c09 to 127.0.0.1:65195 2024-12-06T08:21:28,086 DEBUG [Thread-2443 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:28,107 DEBUG [Thread-2437 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x537a66f8 to 127.0.0.1:65195 2024-12-06T08:21:28,107 DEBUG [Thread-2437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:28,479 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:28,482 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241206e37ad8e2d7a24d0a921b9c1a4576fdf0_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e37ad8e2d7a24d0a921b9c1a4576fdf0_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:28,483 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ccde208cc79349818b7e21340ebf5f90, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:28,483 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ccde208cc79349818b7e21340ebf5f90 is 175, key is test_row_0/A:col10/1733473288069/Put/seqid=0 2024-12-06T08:21:28,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742480_1656 (size=31255) 2024-12-06T08:21:28,887 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ccde208cc79349818b7e21340ebf5f90 2024-12-06T08:21:28,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/ef6a782a961645528ce1ecb145c60226 is 50, key is test_row_0/B:col10/1733473288069/Put/seqid=0 2024-12-06T08:21:28,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742481_1657 (size=12301) 2024-12-06T08:21:29,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/ef6a782a961645528ce1ecb145c60226 2024-12-06T08:21:29,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/ac64868d23f049e59c7164293996f6e8 is 50, key is test_row_0/C:col10/1733473288069/Put/seqid=0 2024-12-06T08:21:29,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742482_1658 (size=12301) 2024-12-06T08:21:29,705 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/ac64868d23f049e59c7164293996f6e8 2024-12-06T08:21:29,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ccde208cc79349818b7e21340ebf5f90 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ccde208cc79349818b7e21340ebf5f90 2024-12-06T08:21:29,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ccde208cc79349818b7e21340ebf5f90, entries=150, sequenceid=372, filesize=30.5 K 2024-12-06T08:21:29,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/ef6a782a961645528ce1ecb145c60226 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ef6a782a961645528ce1ecb145c60226 2024-12-06T08:21:29,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ef6a782a961645528ce1ecb145c60226, entries=150, sequenceid=372, filesize=12.0 K 2024-12-06T08:21:29,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/ac64868d23f049e59c7164293996f6e8 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ac64868d23f049e59c7164293996f6e8 2024-12-06T08:21:29,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ac64868d23f049e59c7164293996f6e8, entries=150, sequenceid=372, filesize=12.0 K 2024-12-06T08:21:29,718 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=20.13 KB/20610 for 4407da04b0ac162131052d3fbc4bef2c in 1647ms, sequenceid=372, compaction requested=false 2024-12-06T08:21:29,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:29,738 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:21:34,871 DEBUG [Thread-2435 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2209c520 to 127.0.0.1:65195 2024-12-06T08:21:34,871 DEBUG [Thread-2435 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7199 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7063 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6925 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7180 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7042 2024-12-06T08:21:34,872 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-06T08:21:34,872 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:21:34,872 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x593af048 to 127.0.0.1:65195 2024-12-06T08:21:34,872 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:34,872 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-06T08:21:34,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-06T08:21:34,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T08:21:34,875 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473294875"}]},"ts":"1733473294875"} 2024-12-06T08:21:34,876 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-06T08:21:34,877 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-06T08:21:34,878 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-06T08:21:34,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, UNASSIGN}] 2024-12-06T08:21:34,879 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, UNASSIGN 2024-12-06T08:21:34,880 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=4407da04b0ac162131052d3fbc4bef2c, regionState=CLOSING, regionLocation=b6b797fc3981,38041,1733473111442 2024-12-06T08:21:34,880 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-06T08:21:34,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; CloseRegionProcedure 4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442}] 2024-12-06T08:21:34,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T08:21:35,031 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38041,1733473111442 2024-12-06T08:21:35,032 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(124): Close 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:35,032 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-06T08:21:35,032 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1681): Closing 4407da04b0ac162131052d3fbc4bef2c, disabling compactions & flushes 2024-12-06T08:21:35,032 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:35,032 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:35,032 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. after waiting 0 ms 2024-12-06T08:21:35,032 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:35,032 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(2837): Flushing 4407da04b0ac162131052d3fbc4bef2c 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-06T08:21:35,032 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=A 2024-12-06T08:21:35,033 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:35,033 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=B 2024-12-06T08:21:35,033 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:35,033 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4407da04b0ac162131052d3fbc4bef2c, store=C 2024-12-06T08:21:35,033 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-06T08:21:35,038 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412062bd8b3f0d55046a798d9208adeb541dc_4407da04b0ac162131052d3fbc4bef2c is 50, key is test_row_0/A:col10/1733473288085/Put/seqid=0 2024-12-06T08:21:35,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742483_1659 (size=9914) 2024-12-06T08:21:35,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T08:21:35,442 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:21:35,445 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412062bd8b3f0d55046a798d9208adeb541dc_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412062bd8b3f0d55046a798d9208adeb541dc_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:35,445 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ab3307c7625f40099a7ddc1cdf54fb5d, store: [table=TestAcidGuarantees family=A region=4407da04b0ac162131052d3fbc4bef2c] 2024-12-06T08:21:35,446 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ab3307c7625f40099a7ddc1cdf54fb5d is 175, key is test_row_0/A:col10/1733473288085/Put/seqid=0 2024-12-06T08:21:35,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742484_1660 (size=22561) 2024-12-06T08:21:35,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T08:21:35,849 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=379, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ab3307c7625f40099a7ddc1cdf54fb5d 2024-12-06T08:21:35,855 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/18825f5db85542598d95805442454a4b is 50, key is test_row_0/B:col10/1733473288085/Put/seqid=0 2024-12-06T08:21:35,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742485_1661 (size=9857) 2024-12-06T08:21:35,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T08:21:36,258 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/18825f5db85542598d95805442454a4b 2024-12-06T08:21:36,263 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/929d46491df94f528871059d07340837 is 50, key is test_row_0/C:col10/1733473288085/Put/seqid=0 2024-12-06T08:21:36,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742486_1662 (size=9857) 2024-12-06T08:21:36,667 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/929d46491df94f528871059d07340837 2024-12-06T08:21:36,670 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/A/ab3307c7625f40099a7ddc1cdf54fb5d as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab3307c7625f40099a7ddc1cdf54fb5d 2024-12-06T08:21:36,673 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab3307c7625f40099a7ddc1cdf54fb5d, entries=100, sequenceid=379, filesize=22.0 K 2024-12-06T08:21:36,674 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/B/18825f5db85542598d95805442454a4b as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/18825f5db85542598d95805442454a4b 2024-12-06T08:21:36,677 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/18825f5db85542598d95805442454a4b, entries=100, sequenceid=379, filesize=9.6 K 2024-12-06T08:21:36,677 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/.tmp/C/929d46491df94f528871059d07340837 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/929d46491df94f528871059d07340837 2024-12-06T08:21:36,680 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/929d46491df94f528871059d07340837, entries=100, sequenceid=379, filesize=9.6 K 2024-12-06T08:21:36,680 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 4407da04b0ac162131052d3fbc4bef2c in 1648ms, sequenceid=379, compaction requested=true 2024-12-06T08:21:36,681 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab007ed019fd43898977a21bd1debc66, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/a8342f95e78c474bb5c9317a4f69e964, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/45d62b2570b2467486294b0e29c77c53, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/37b5637f9b8e4bb38257bb31fe035efd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b0ed8592a671457ea0761dbe20a85288, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e35a2d5f235245b59b84c84c1e45c947, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/4b0c5061967b45ba9d8c9edd59e4714b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c2b9716f8fe94eb4962a97b9dfa853e6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b62f90107dd24591900de92ad95d8054, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e073010fde8343d297cd1bbec93f5845, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b28b6eadb21746e39db127ef40d5cb4e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c55fc02dbe324ef18f8f00bfbbd573e5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e9d72625beeb4e21ae05d0b589e6208e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/8929ca1b50ee407db80b7a94f1e16bae, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/0b33bed025074e16ba1eeeeb7752348b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/31cb6fa2f6714862be7a8ee2ed9f16e0, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/693c0707b9fb4d538d22b64586a09dfd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/d40032f5a4ec4d0a9702b89553b89b20, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/5039d83e4e7c4625b15fae98debba9c6, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fbe58eeb5e5c4e7c863d41fb6a5cac92, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3d0dd70a30834edbbf12d680793ee44d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fb6f5ddb2f0a4b589364015ed7e4ff20, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/75f37378b13241bf91f51141b6c5261e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3cbf8592e347450e91a28635ceeac112, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/2b6ad138e24346d1ade5d01146ad2cb5] to archive 2024-12-06T08:21:36,681 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:21:36,683 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab007ed019fd43898977a21bd1debc66 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab007ed019fd43898977a21bd1debc66 2024-12-06T08:21:36,684 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/a8342f95e78c474bb5c9317a4f69e964 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/a8342f95e78c474bb5c9317a4f69e964 2024-12-06T08:21:36,684 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/45d62b2570b2467486294b0e29c77c53 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/45d62b2570b2467486294b0e29c77c53 2024-12-06T08:21:36,685 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/37b5637f9b8e4bb38257bb31fe035efd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/37b5637f9b8e4bb38257bb31fe035efd 2024-12-06T08:21:36,686 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b0ed8592a671457ea0761dbe20a85288 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b0ed8592a671457ea0761dbe20a85288 2024-12-06T08:21:36,687 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e35a2d5f235245b59b84c84c1e45c947 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e35a2d5f235245b59b84c84c1e45c947 2024-12-06T08:21:36,687 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/4b0c5061967b45ba9d8c9edd59e4714b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/4b0c5061967b45ba9d8c9edd59e4714b 2024-12-06T08:21:36,688 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c2b9716f8fe94eb4962a97b9dfa853e6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c2b9716f8fe94eb4962a97b9dfa853e6 2024-12-06T08:21:36,689 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b62f90107dd24591900de92ad95d8054 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b62f90107dd24591900de92ad95d8054 2024-12-06T08:21:36,690 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e073010fde8343d297cd1bbec93f5845 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e073010fde8343d297cd1bbec93f5845 2024-12-06T08:21:36,691 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b28b6eadb21746e39db127ef40d5cb4e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/b28b6eadb21746e39db127ef40d5cb4e 2024-12-06T08:21:36,691 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c55fc02dbe324ef18f8f00bfbbd573e5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/c55fc02dbe324ef18f8f00bfbbd573e5 2024-12-06T08:21:36,692 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e9d72625beeb4e21ae05d0b589e6208e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/e9d72625beeb4e21ae05d0b589e6208e 2024-12-06T08:21:36,693 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/8929ca1b50ee407db80b7a94f1e16bae to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/8929ca1b50ee407db80b7a94f1e16bae 2024-12-06T08:21:36,694 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/0b33bed025074e16ba1eeeeb7752348b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/0b33bed025074e16ba1eeeeb7752348b 2024-12-06T08:21:36,694 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/31cb6fa2f6714862be7a8ee2ed9f16e0 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/31cb6fa2f6714862be7a8ee2ed9f16e0 2024-12-06T08:21:36,695 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/693c0707b9fb4d538d22b64586a09dfd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/693c0707b9fb4d538d22b64586a09dfd 2024-12-06T08:21:36,696 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/d40032f5a4ec4d0a9702b89553b89b20 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/d40032f5a4ec4d0a9702b89553b89b20 2024-12-06T08:21:36,697 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/5039d83e4e7c4625b15fae98debba9c6 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/5039d83e4e7c4625b15fae98debba9c6 2024-12-06T08:21:36,697 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fbe58eeb5e5c4e7c863d41fb6a5cac92 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fbe58eeb5e5c4e7c863d41fb6a5cac92 2024-12-06T08:21:36,698 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3d0dd70a30834edbbf12d680793ee44d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3d0dd70a30834edbbf12d680793ee44d 2024-12-06T08:21:36,699 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fb6f5ddb2f0a4b589364015ed7e4ff20 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fb6f5ddb2f0a4b589364015ed7e4ff20 2024-12-06T08:21:36,700 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/75f37378b13241bf91f51141b6c5261e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/75f37378b13241bf91f51141b6c5261e 2024-12-06T08:21:36,701 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3cbf8592e347450e91a28635ceeac112 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/3cbf8592e347450e91a28635ceeac112 2024-12-06T08:21:36,701 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/2b6ad138e24346d1ade5d01146ad2cb5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/2b6ad138e24346d1ade5d01146ad2cb5 2024-12-06T08:21:36,702 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ec8222fc8d8d4d3f8ff5a093a46f4e47, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/41507be46e49451c9a31a01524e8e524, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/fb79320c97ce4333acfaf58abb5923fc, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0e37f1b1220d4f0d86cfe2eceb2e0d65, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/43ba1b94963e4925b5483436b7c53f6e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/acbf38b8f85f41b0913855bf8258a9de, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/483a7ce99114435abbcda92b4986f34c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/987b0d0448944fb997037b66a7e6891a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/bbf25487ca454d2089973d621d7332b9, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/51dffdebc3d04a5d92112d1659a04526, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/8fafcd8f0f8e40f1b05f45289b04c1bd, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0c23769881d74cdebe9286d4af6a96e5, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ffafc622a8f24b3990ed739e43bc394d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/6e3f299b3f4c4eaf907599be0e7d5c52, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/c6700d97994a440da3556fee8f7f71b7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/d3b9460d2f4b436bb24bf5901379af9e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/78d0c0c9fe63425baf72f02d439a4d5a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7283842bae524cb09284982a51a53874, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/a7d63bf6a8a04eefbc7be8f1cbb5935d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/e9b313d0fc854658b645176b4563b590, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/b871d4cfb65b4cca94935bb445e1d5d4, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/fc43e092d0fa4d80841da15afb76fb77, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/211ec5fe2b9b4d5d91b2dc13d672ab2b, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7038b3b0e4a44ae693ec05ac72125a84, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/f4fb2f47bcab45d38709e1a3d21c3bbd] to archive 2024-12-06T08:21:36,703 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:21:36,704 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ec8222fc8d8d4d3f8ff5a093a46f4e47 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ec8222fc8d8d4d3f8ff5a093a46f4e47 2024-12-06T08:21:36,705 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/41507be46e49451c9a31a01524e8e524 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/41507be46e49451c9a31a01524e8e524 2024-12-06T08:21:36,706 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/fb79320c97ce4333acfaf58abb5923fc to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/fb79320c97ce4333acfaf58abb5923fc 2024-12-06T08:21:36,707 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0e37f1b1220d4f0d86cfe2eceb2e0d65 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0e37f1b1220d4f0d86cfe2eceb2e0d65 2024-12-06T08:21:36,708 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/43ba1b94963e4925b5483436b7c53f6e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/43ba1b94963e4925b5483436b7c53f6e 2024-12-06T08:21:36,708 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/acbf38b8f85f41b0913855bf8258a9de to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/acbf38b8f85f41b0913855bf8258a9de 2024-12-06T08:21:36,709 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/483a7ce99114435abbcda92b4986f34c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/483a7ce99114435abbcda92b4986f34c 2024-12-06T08:21:36,710 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/987b0d0448944fb997037b66a7e6891a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/987b0d0448944fb997037b66a7e6891a 2024-12-06T08:21:36,711 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/bbf25487ca454d2089973d621d7332b9 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/bbf25487ca454d2089973d621d7332b9 2024-12-06T08:21:36,712 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/51dffdebc3d04a5d92112d1659a04526 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/51dffdebc3d04a5d92112d1659a04526 2024-12-06T08:21:36,712 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/8fafcd8f0f8e40f1b05f45289b04c1bd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/8fafcd8f0f8e40f1b05f45289b04c1bd 2024-12-06T08:21:36,713 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0c23769881d74cdebe9286d4af6a96e5 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/0c23769881d74cdebe9286d4af6a96e5 2024-12-06T08:21:36,714 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ffafc622a8f24b3990ed739e43bc394d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ffafc622a8f24b3990ed739e43bc394d 2024-12-06T08:21:36,715 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/6e3f299b3f4c4eaf907599be0e7d5c52 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/6e3f299b3f4c4eaf907599be0e7d5c52 2024-12-06T08:21:36,716 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/c6700d97994a440da3556fee8f7f71b7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/c6700d97994a440da3556fee8f7f71b7 2024-12-06T08:21:36,716 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/d3b9460d2f4b436bb24bf5901379af9e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/d3b9460d2f4b436bb24bf5901379af9e 2024-12-06T08:21:36,717 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/78d0c0c9fe63425baf72f02d439a4d5a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/78d0c0c9fe63425baf72f02d439a4d5a 2024-12-06T08:21:36,718 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7283842bae524cb09284982a51a53874 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7283842bae524cb09284982a51a53874 2024-12-06T08:21:36,719 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/a7d63bf6a8a04eefbc7be8f1cbb5935d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/a7d63bf6a8a04eefbc7be8f1cbb5935d 2024-12-06T08:21:36,720 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/e9b313d0fc854658b645176b4563b590 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/e9b313d0fc854658b645176b4563b590 2024-12-06T08:21:36,720 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/b871d4cfb65b4cca94935bb445e1d5d4 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/b871d4cfb65b4cca94935bb445e1d5d4 2024-12-06T08:21:36,721 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/fc43e092d0fa4d80841da15afb76fb77 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/fc43e092d0fa4d80841da15afb76fb77 2024-12-06T08:21:36,722 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/211ec5fe2b9b4d5d91b2dc13d672ab2b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/211ec5fe2b9b4d5d91b2dc13d672ab2b 2024-12-06T08:21:36,723 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7038b3b0e4a44ae693ec05ac72125a84 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/7038b3b0e4a44ae693ec05ac72125a84 2024-12-06T08:21:36,724 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/f4fb2f47bcab45d38709e1a3d21c3bbd to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/f4fb2f47bcab45d38709e1a3d21c3bbd 2024-12-06T08:21:36,724 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ed66a15171e04ce1a7696cd51a24de91, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2de6af495ee646ccaf47760f1b7fe041, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/820fe96ac1da45fd8ecb1ef997828832, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b629c79d7ae8405baa35dd0c3ce2f692, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/1eb445a634434dc9b80eaa431c7d23ee, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/c57492566d7b48dc80a2b8942804e809, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/4a79e27ab8d946349ba702acfc0733af, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/0b9602b5b43341d9a436fdb32868fb7a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/46fc128ee2d14b2486c07754e5a0d05a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/e41f0c8a8ae54608b77cc2209fd1fe92, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/dd10a9f2d0df4b0eacc2de9d58a84411, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/3b556d263ca84035b946964e7458209e, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/cdb6e07c3e174c919a4f3f3271adfd5c, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2908b69a03d7457f9a69850c01b6d9c2, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/aec89d3724a44f379a0ee5598908be3a, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2f98dcb1c42647e1948d027123f2ca43, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/112766ca3d804c73a713772de7696262, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ac80817d29ff4ca59ad57a7e10cd2428, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b7afb6961b864e82baed2dfeced0e002, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/69d01f22ec614d83bb08c922c4d51a4d, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/01380b6ffdcf4844bcd3f75eb602f159, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/5058c25c504b41ab90f19b363e50fdb7, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/47711c65ad924856964ca3aa9fd6b3ae, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/28d0f32c69a747c295d8c5f5879796ad, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/8b7acaf316484e859c94bf7bfa4a7afc] to archive 2024-12-06T08:21:36,725 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:21:36,726 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ed66a15171e04ce1a7696cd51a24de91 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ed66a15171e04ce1a7696cd51a24de91 2024-12-06T08:21:36,727 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2de6af495ee646ccaf47760f1b7fe041 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2de6af495ee646ccaf47760f1b7fe041 2024-12-06T08:21:36,728 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/820fe96ac1da45fd8ecb1ef997828832 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/820fe96ac1da45fd8ecb1ef997828832 2024-12-06T08:21:36,728 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b629c79d7ae8405baa35dd0c3ce2f692 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b629c79d7ae8405baa35dd0c3ce2f692 2024-12-06T08:21:36,729 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/1eb445a634434dc9b80eaa431c7d23ee to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/1eb445a634434dc9b80eaa431c7d23ee 2024-12-06T08:21:36,730 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/c57492566d7b48dc80a2b8942804e809 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/c57492566d7b48dc80a2b8942804e809 2024-12-06T08:21:36,731 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/4a79e27ab8d946349ba702acfc0733af to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/4a79e27ab8d946349ba702acfc0733af 2024-12-06T08:21:36,731 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/0b9602b5b43341d9a436fdb32868fb7a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/0b9602b5b43341d9a436fdb32868fb7a 2024-12-06T08:21:36,732 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/46fc128ee2d14b2486c07754e5a0d05a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/46fc128ee2d14b2486c07754e5a0d05a 2024-12-06T08:21:36,733 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/e41f0c8a8ae54608b77cc2209fd1fe92 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/e41f0c8a8ae54608b77cc2209fd1fe92 2024-12-06T08:21:36,734 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/dd10a9f2d0df4b0eacc2de9d58a84411 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/dd10a9f2d0df4b0eacc2de9d58a84411 2024-12-06T08:21:36,734 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/3b556d263ca84035b946964e7458209e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/3b556d263ca84035b946964e7458209e 2024-12-06T08:21:36,735 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/cdb6e07c3e174c919a4f3f3271adfd5c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/cdb6e07c3e174c919a4f3f3271adfd5c 2024-12-06T08:21:36,736 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2908b69a03d7457f9a69850c01b6d9c2 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2908b69a03d7457f9a69850c01b6d9c2 2024-12-06T08:21:36,737 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/aec89d3724a44f379a0ee5598908be3a to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/aec89d3724a44f379a0ee5598908be3a 2024-12-06T08:21:36,738 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2f98dcb1c42647e1948d027123f2ca43 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/2f98dcb1c42647e1948d027123f2ca43 2024-12-06T08:21:36,738 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/112766ca3d804c73a713772de7696262 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/112766ca3d804c73a713772de7696262 2024-12-06T08:21:36,739 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ac80817d29ff4ca59ad57a7e10cd2428 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ac80817d29ff4ca59ad57a7e10cd2428 2024-12-06T08:21:36,740 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b7afb6961b864e82baed2dfeced0e002 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/b7afb6961b864e82baed2dfeced0e002 2024-12-06T08:21:36,741 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/69d01f22ec614d83bb08c922c4d51a4d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/69d01f22ec614d83bb08c922c4d51a4d 2024-12-06T08:21:36,741 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/01380b6ffdcf4844bcd3f75eb602f159 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/01380b6ffdcf4844bcd3f75eb602f159 2024-12-06T08:21:36,742 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/5058c25c504b41ab90f19b363e50fdb7 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/5058c25c504b41ab90f19b363e50fdb7 2024-12-06T08:21:36,743 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/47711c65ad924856964ca3aa9fd6b3ae to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/47711c65ad924856964ca3aa9fd6b3ae 2024-12-06T08:21:36,744 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/28d0f32c69a747c295d8c5f5879796ad to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/28d0f32c69a747c295d8c5f5879796ad 2024-12-06T08:21:36,745 DEBUG [StoreCloser-TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/8b7acaf316484e859c94bf7bfa4a7afc to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/8b7acaf316484e859c94bf7bfa4a7afc 2024-12-06T08:21:36,748 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/recovered.edits/382.seqid, newMaxSeqId=382, maxSeqId=4 2024-12-06T08:21:36,749 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c. 2024-12-06T08:21:36,749 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1635): Region close journal for 4407da04b0ac162131052d3fbc4bef2c: 2024-12-06T08:21:36,750 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(170): Closed 4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:36,750 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=4407da04b0ac162131052d3fbc4bef2c, regionState=CLOSED 2024-12-06T08:21:36,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-12-06T08:21:36,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseRegionProcedure 4407da04b0ac162131052d3fbc4bef2c, server=b6b797fc3981,38041,1733473111442 in 1.8710 sec 2024-12-06T08:21:36,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-12-06T08:21:36,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4407da04b0ac162131052d3fbc4bef2c, UNASSIGN in 1.8740 sec 2024-12-06T08:21:36,754 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-12-06T08:21:36,754 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8750 sec 2024-12-06T08:21:36,755 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473296755"}]},"ts":"1733473296755"} 2024-12-06T08:21:36,756 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-06T08:21:36,758 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-06T08:21:36,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8850 sec 2024-12-06T08:21:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-06T08:21:36,978 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-12-06T08:21:36,979 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-06T08:21:36,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:36,980 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:36,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-06T08:21:36,980 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=183, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:36,982 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:36,983 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C, FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/recovered.edits] 2024-12-06T08:21:36,985 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab3307c7625f40099a7ddc1cdf54fb5d to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ab3307c7625f40099a7ddc1cdf54fb5d 2024-12-06T08:21:36,986 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ccde208cc79349818b7e21340ebf5f90 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/ccde208cc79349818b7e21340ebf5f90 2024-12-06T08:21:36,987 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fecd2fe8db69455b9be5701518b4eda3 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/A/fecd2fe8db69455b9be5701518b4eda3 2024-12-06T08:21:36,989 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/00fb589296c74268aa05370e5138b85e to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/00fb589296c74268aa05370e5138b85e 2024-12-06T08:21:36,990 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/18825f5db85542598d95805442454a4b to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/18825f5db85542598d95805442454a4b 2024-12-06T08:21:36,990 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ef6a782a961645528ce1ecb145c60226 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/B/ef6a782a961645528ce1ecb145c60226 2024-12-06T08:21:36,992 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/0b82d35c9a5d4156a6c216de082c7b07 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/0b82d35c9a5d4156a6c216de082c7b07 2024-12-06T08:21:36,993 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/929d46491df94f528871059d07340837 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/929d46491df94f528871059d07340837 2024-12-06T08:21:36,994 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ac64868d23f049e59c7164293996f6e8 to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/C/ac64868d23f049e59c7164293996f6e8 2024-12-06T08:21:36,995 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/recovered.edits/382.seqid to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c/recovered.edits/382.seqid 2024-12-06T08:21:36,996 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/default/TestAcidGuarantees/4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:36,996 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-06T08:21:36,996 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T08:21:36,997 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-06T08:21:36,999 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120600453ad2a932499b9e5a13b4a3187e49_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120600453ad2a932499b9e5a13b4a3187e49_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,000 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120604717e596ef84543a01c4db8e6a57fe1_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120604717e596ef84543a01c4db8e6a57fe1_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,000 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412062bd8b3f0d55046a798d9208adeb541dc_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412062bd8b3f0d55046a798d9208adeb541dc_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,001 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063deed36ad1604a68a8774221ee60139f_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412063deed36ad1604a68a8774221ee60139f_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,002 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120643c7e09b753c4631953bd75f782d6350_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120643c7e09b753c4631953bd75f782d6350_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,003 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120645f7468886824ca2a2904d36182a4d5e_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120645f7468886824ca2a2904d36182a4d5e_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,004 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120653f854422e46466c8cda4b62720a8949_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120653f854422e46466c8cda4b62720a8949_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,004 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412065bf163a363bc43e2bc64c07c8ab5ee69_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412065bf163a363bc43e2bc64c07c8ab5ee69_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,005 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120675aba228b1ee426598ce0f3a2b0b6485_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120675aba228b1ee426598ce0f3a2b0b6485_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,006 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067cd0c7cc584d4e61870ee26f76863790_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412067cd0c7cc584d4e61870ee26f76863790_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,007 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120681874dd0739445eba47c734e58b0520d_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120681874dd0739445eba47c734e58b0520d_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,007 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068ce9e77064d44f958da5bdbcc8e2c01a_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412068ce9e77064d44f958da5bdbcc8e2c01a_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,008 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069f2321e93cb64948b2e8799cfdb0d6fe_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412069f2321e93cb64948b2e8799cfdb0d6fe_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,009 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b050d0c2e9b543e7935cf97f4209ad7e_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206b050d0c2e9b543e7935cf97f4209ad7e_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,010 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c9fbb30599f04d4497d6ca6806127b73_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206c9fbb30599f04d4497d6ca6806127b73_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,011 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206cf4d8e0451d84c8083061cb6cbd0627d_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206cf4d8e0451d84c8083061cb6cbd0627d_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,011 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d439b57ee9fd4020894362c87e637698_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206d439b57ee9fd4020894362c87e637698_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,012 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206dd1aa6d608644fc9af5e5359808945ce_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206dd1aa6d608644fc9af5e5359808945ce_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,013 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e37ad8e2d7a24d0a921b9c1a4576fdf0_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e37ad8e2d7a24d0a921b9c1a4576fdf0_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,014 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e69c1978c6ec4639bdfbc66b04b8ef7a_4407da04b0ac162131052d3fbc4bef2c to hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241206e69c1978c6ec4639bdfbc66b04b8ef7a_4407da04b0ac162131052d3fbc4bef2c 2024-12-06T08:21:37,014 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-06T08:21:37,016 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=183, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:37,018 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-06T08:21:37,019 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-06T08:21:37,020 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=183, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:37,020 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-06T08:21:37,020 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733473297020"}]},"ts":"9223372036854775807"} 2024-12-06T08:21:37,021 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-06T08:21:37,021 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4407da04b0ac162131052d3fbc4bef2c, NAME => 'TestAcidGuarantees,,1733473262946.4407da04b0ac162131052d3fbc4bef2c.', STARTKEY => '', ENDKEY => ''}] 2024-12-06T08:21:37,022 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-06T08:21:37,022 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733473297022"}]},"ts":"9223372036854775807"} 2024-12-06T08:21:37,023 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-06T08:21:37,025 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=183, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-06T08:21:37,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 46 msec 2024-12-06T08:21:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42303 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-06T08:21:37,081 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-12-06T08:21:37,091 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=236 (was 239), OpenFileDescriptor=451 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=369 (was 432), ProcessCount=9 (was 9), AvailableMemoryMB=8445 (was 8464) 2024-12-06T08:21:37,091 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T08:21:37,091 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:21:37,091 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3771e354 to 127.0.0.1:65195 2024-12-06T08:21:37,091 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:37,091 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T08:21:37,091 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1173057206, stopped=false 2024-12-06T08:21:37,092 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b6b797fc3981,42303,1733473110672 2024-12-06T08:21:37,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:21:37,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:21:37,093 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T08:21:37,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:21:37,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:21:37,094 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:37,094 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,38041,1733473111442' ***** 2024-12-06T08:21:37,094 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T08:21:37,094 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:21:37,094 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:21:37,094 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:21:37,094 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T08:21:37,094 INFO [RS:0;b6b797fc3981:38041 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:21:37,094 INFO [RS:0;b6b797fc3981:38041 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:21:37,095 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(3579): Received CLOSE for ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:21:37,095 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,38041,1733473111442 2024-12-06T08:21:37,095 DEBUG [RS:0;b6b797fc3981:38041 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:37,095 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:21:37,095 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:21:37,095 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:21:37,095 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T08:21:37,095 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ad6c9f95f0b663a6596ce60f0a457f00, disabling compactions & flushes 2024-12-06T08:21:37,095 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:21:37,095 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:21:37,095 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. after waiting 0 ms 2024-12-06T08:21:37,095 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-06T08:21:37,095 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:21:37,096 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, ad6c9f95f0b663a6596ce60f0a457f00=hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00.} 2024-12-06T08:21:37,096 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing ad6c9f95f0b663a6596ce60f0a457f00 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T08:21:37,096 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:21:37,096 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:21:37,096 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:21:37,096 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:21:37,096 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:21:37,096 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-06T08:21:37,099 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:21:37,113 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/namespace/ad6c9f95f0b663a6596ce60f0a457f00/.tmp/info/b742990305494ba6983888be52bdb0bb is 45, key is default/info:d/1733473115585/Put/seqid=0 2024-12-06T08:21:37,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742487_1663 (size=5037) 2024-12-06T08:21:37,123 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/.tmp/info/2181b1f01e8e40bc9aa74725e3912324 is 143, key is hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00./info:regioninfo/1733473115467/Put/seqid=0 2024-12-06T08:21:37,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742488_1664 (size=7725) 2024-12-06T08:21:37,168 INFO [regionserver/b6b797fc3981:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:21:37,299 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:21:37,499 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, ad6c9f95f0b663a6596ce60f0a457f00 2024-12-06T08:21:37,517 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/namespace/ad6c9f95f0b663a6596ce60f0a457f00/.tmp/info/b742990305494ba6983888be52bdb0bb 2024-12-06T08:21:37,520 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/namespace/ad6c9f95f0b663a6596ce60f0a457f00/.tmp/info/b742990305494ba6983888be52bdb0bb as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/namespace/ad6c9f95f0b663a6596ce60f0a457f00/info/b742990305494ba6983888be52bdb0bb 2024-12-06T08:21:37,522 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/namespace/ad6c9f95f0b663a6596ce60f0a457f00/info/b742990305494ba6983888be52bdb0bb, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T08:21:37,523 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for ad6c9f95f0b663a6596ce60f0a457f00 in 427ms, sequenceid=6, compaction requested=false 2024-12-06T08:21:37,526 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/namespace/ad6c9f95f0b663a6596ce60f0a457f00/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T08:21:37,526 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/.tmp/info/2181b1f01e8e40bc9aa74725e3912324 2024-12-06T08:21:37,526 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:21:37,526 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ad6c9f95f0b663a6596ce60f0a457f00: 2024-12-06T08:21:37,526 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733473114636.ad6c9f95f0b663a6596ce60f0a457f00. 2024-12-06T08:21:37,543 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/.tmp/rep_barrier/60b88e41f6104b0092d7327c931260b9 is 102, key is TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad./rep_barrier:/1733473144562/DeleteFamily/seqid=0 2024-12-06T08:21:37,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742489_1665 (size=6025) 2024-12-06T08:21:37,700 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-06T08:21:37,736 INFO [regionserver/b6b797fc3981:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T08:21:37,736 INFO [regionserver/b6b797fc3981:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T08:21:37,900 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-06T08:21:37,947 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/.tmp/rep_barrier/60b88e41f6104b0092d7327c931260b9 2024-12-06T08:21:37,965 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/.tmp/table/88cb919f93e74202b1c58b5128b3afd0 is 96, key is TestAcidGuarantees,,1733473115776.27b39638a4e82980fa51c6694c44d0ad./table:/1733473144562/DeleteFamily/seqid=0 2024-12-06T08:21:37,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742490_1666 (size=5942) 2024-12-06T08:21:38,100 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-06T08:21:38,100 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-06T08:21:38,100 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-06T08:21:38,300 DEBUG [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-06T08:21:38,369 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/.tmp/table/88cb919f93e74202b1c58b5128b3afd0 2024-12-06T08:21:38,372 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/.tmp/info/2181b1f01e8e40bc9aa74725e3912324 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/info/2181b1f01e8e40bc9aa74725e3912324 2024-12-06T08:21:38,374 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/info/2181b1f01e8e40bc9aa74725e3912324, entries=22, sequenceid=93, filesize=7.5 K 2024-12-06T08:21:38,375 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/.tmp/rep_barrier/60b88e41f6104b0092d7327c931260b9 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/rep_barrier/60b88e41f6104b0092d7327c931260b9 2024-12-06T08:21:38,377 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/rep_barrier/60b88e41f6104b0092d7327c931260b9, entries=6, sequenceid=93, filesize=5.9 K 2024-12-06T08:21:38,377 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/.tmp/table/88cb919f93e74202b1c58b5128b3afd0 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/table/88cb919f93e74202b1c58b5128b3afd0 2024-12-06T08:21:38,379 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/table/88cb919f93e74202b1c58b5128b3afd0, entries=9, sequenceid=93, filesize=5.8 K 2024-12-06T08:21:38,380 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1284ms, sequenceid=93, compaction requested=false 2024-12-06T08:21:38,383 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-06T08:21:38,384 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T08:21:38,384 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:21:38,384 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:21:38,384 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T08:21:38,500 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,38041,1733473111442; all regions closed. 2024-12-06T08:21:38,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741834_1010 (size=26050) 2024-12-06T08:21:38,506 DEBUG [RS:0;b6b797fc3981:38041 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/oldWALs 2024-12-06T08:21:38,506 INFO [RS:0;b6b797fc3981:38041 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL b6b797fc3981%2C38041%2C1733473111442.meta:.meta(num 1733473114386) 2024-12-06T08:21:38,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741833_1009 (size=15023455) 2024-12-06T08:21:38,509 DEBUG [RS:0;b6b797fc3981:38041 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/oldWALs 2024-12-06T08:21:38,509 INFO [RS:0;b6b797fc3981:38041 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL b6b797fc3981%2C38041%2C1733473111442:(num 1733473113820) 2024-12-06T08:21:38,509 DEBUG [RS:0;b6b797fc3981:38041 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:38,510 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:21:38,510 INFO [RS:0;b6b797fc3981:38041 {}] hbase.ChoreService(370): Chore service for: regionserver/b6b797fc3981:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-06T08:21:38,510 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:21:38,510 INFO [RS:0;b6b797fc3981:38041 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38041 2024-12-06T08:21:38,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:21:38,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b6b797fc3981,38041,1733473111442 2024-12-06T08:21:38,515 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b6b797fc3981,38041,1733473111442] 2024-12-06T08:21:38,516 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b6b797fc3981,38041,1733473111442; numProcessing=1 2024-12-06T08:21:38,517 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b6b797fc3981,38041,1733473111442 already deleted, retry=false 2024-12-06T08:21:38,517 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b6b797fc3981,38041,1733473111442 expired; onlineServers=0 2024-12-06T08:21:38,517 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,42303,1733473110672' ***** 2024-12-06T08:21:38,517 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T08:21:38,517 DEBUG [M:0;b6b797fc3981:42303 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d9287db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:21:38,517 INFO [M:0;b6b797fc3981:42303 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,42303,1733473110672 2024-12-06T08:21:38,517 INFO [M:0;b6b797fc3981:42303 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,42303,1733473110672; all regions closed. 2024-12-06T08:21:38,517 DEBUG [M:0;b6b797fc3981:42303 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:21:38,517 DEBUG [M:0;b6b797fc3981:42303 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T08:21:38,517 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T08:21:38,517 DEBUG [M:0;b6b797fc3981:42303 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T08:21:38,517 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733473113552 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733473113552,5,FailOnTimeoutGroup] 2024-12-06T08:21:38,517 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733473113550 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733473113550,5,FailOnTimeoutGroup] 2024-12-06T08:21:38,518 INFO [M:0;b6b797fc3981:42303 {}] hbase.ChoreService(370): Chore service for: master/b6b797fc3981:0 had [] on shutdown 2024-12-06T08:21:38,518 DEBUG [M:0;b6b797fc3981:42303 {}] master.HMaster(1733): Stopping service threads 2024-12-06T08:21:38,518 INFO [M:0;b6b797fc3981:42303 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T08:21:38,518 ERROR [M:0;b6b797fc3981:42303 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-12-06T08:21:38,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T08:21:38,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:21:38,519 INFO [M:0;b6b797fc3981:42303 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T08:21:38,519 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:21:38,519 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T08:21:38,519 DEBUG [M:0;b6b797fc3981:42303 {}] zookeeper.ZKUtil(347): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T08:21:38,519 WARN [M:0;b6b797fc3981:42303 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T08:21:38,519 INFO [M:0;b6b797fc3981:42303 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T08:21:38,519 INFO [M:0;b6b797fc3981:42303 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T08:21:38,519 DEBUG [M:0;b6b797fc3981:42303 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:21:38,519 INFO [M:0;b6b797fc3981:42303 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:21:38,519 DEBUG [M:0;b6b797fc3981:42303 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:21:38,519 DEBUG [M:0;b6b797fc3981:42303 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:21:38,519 DEBUG [M:0;b6b797fc3981:42303 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:21:38,519 INFO [M:0;b6b797fc3981:42303 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=785.56 KB heapSize=967.20 KB 2024-12-06T08:21:38,542 DEBUG [M:0;b6b797fc3981:42303 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c435dea3ca944d6586195630cc118ce4 is 82, key is hbase:meta,,1/info:regioninfo/1733473114525/Put/seqid=0 2024-12-06T08:21:38,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742491_1667 (size=5672) 2024-12-06T08:21:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:21:38,616 INFO [RS:0;b6b797fc3981:38041 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,38041,1733473111442; zookeeper connection closed. 2024-12-06T08:21:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38041-0x100666840130001, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:21:38,616 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3d2f3ca0 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3d2f3ca0 2024-12-06T08:21:38,617 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T08:21:38,946 INFO [M:0;b6b797fc3981:42303 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2241 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c435dea3ca944d6586195630cc118ce4 2024-12-06T08:21:38,966 DEBUG [M:0;b6b797fc3981:42303 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9cbebf024ee4620ba7c09bf5b8d7dda is 2284, key is \x00\x00\x00\x00\x00\x00\x00\x9E/proc:d/1733473265963/Put/seqid=0 2024-12-06T08:21:38,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742492_1668 (size=45750) 2024-12-06T08:21:39,370 INFO [M:0;b6b797fc3981:42303 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=785.00 KB at sequenceid=2241 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9cbebf024ee4620ba7c09bf5b8d7dda 2024-12-06T08:21:39,373 INFO [M:0;b6b797fc3981:42303 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f9cbebf024ee4620ba7c09bf5b8d7dda 2024-12-06T08:21:39,388 DEBUG [M:0;b6b797fc3981:42303 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f987bb141be94760ad7543a97823e1b3 is 69, key is b6b797fc3981,38041,1733473111442/rs:state/1733473113582/Put/seqid=0 2024-12-06T08:21:39,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073742493_1669 (size=5156) 2024-12-06T08:21:39,791 INFO [M:0;b6b797fc3981:42303 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2241 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f987bb141be94760ad7543a97823e1b3 2024-12-06T08:21:39,794 DEBUG [M:0;b6b797fc3981:42303 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c435dea3ca944d6586195630cc118ce4 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c435dea3ca944d6586195630cc118ce4 2024-12-06T08:21:39,797 INFO [M:0;b6b797fc3981:42303 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c435dea3ca944d6586195630cc118ce4, entries=8, sequenceid=2241, filesize=5.5 K 2024-12-06T08:21:39,797 DEBUG [M:0;b6b797fc3981:42303 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9cbebf024ee4620ba7c09bf5b8d7dda as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f9cbebf024ee4620ba7c09bf5b8d7dda 2024-12-06T08:21:39,800 INFO [M:0;b6b797fc3981:42303 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f9cbebf024ee4620ba7c09bf5b8d7dda 2024-12-06T08:21:39,800 INFO [M:0;b6b797fc3981:42303 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f9cbebf024ee4620ba7c09bf5b8d7dda, entries=183, sequenceid=2241, filesize=44.7 K 2024-12-06T08:21:39,800 DEBUG [M:0;b6b797fc3981:42303 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f987bb141be94760ad7543a97823e1b3 as hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f987bb141be94760ad7543a97823e1b3 2024-12-06T08:21:39,802 INFO [M:0;b6b797fc3981:42303 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/c6bcd02f-3ff7-c480-ced2-bd9437445156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f987bb141be94760ad7543a97823e1b3, entries=1, sequenceid=2241, filesize=5.0 K 2024-12-06T08:21:39,803 INFO [M:0;b6b797fc3981:42303 {}] regionserver.HRegion(3040): Finished flush of dataSize ~785.56 KB/804410, heapSize ~966.90 KB/990104, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1284ms, sequenceid=2241, compaction requested=false 2024-12-06T08:21:39,804 INFO [M:0;b6b797fc3981:42303 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:21:39,804 DEBUG [M:0;b6b797fc3981:42303 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:21:39,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40801 is added to blk_1073741830_1006 (size=951330) 2024-12-06T08:21:39,806 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:21:39,807 INFO [M:0;b6b797fc3981:42303 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T08:21:39,807 INFO [M:0;b6b797fc3981:42303 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42303 2024-12-06T08:21:39,808 DEBUG [M:0;b6b797fc3981:42303 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b6b797fc3981,42303,1733473110672 already deleted, retry=false 2024-12-06T08:21:39,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:21:39,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42303-0x100666840130000, quorum=127.0.0.1:65195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:21:39,910 INFO [M:0;b6b797fc3981:42303 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,42303,1733473110672; zookeeper connection closed. 2024-12-06T08:21:39,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10ba49e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:21:39,917 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@661c2e9c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:21:39,917 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:21:39,917 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca71a25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:21:39,917 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@134e7cc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/hadoop.log.dir/,STOPPED} 2024-12-06T08:21:39,920 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:21:39,920 WARN [BP-131426492-172.17.0.2-1733473107734 heartbeating to localhost/127.0.0.1:43731 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:21:39,920 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:21:39,920 WARN [BP-131426492-172.17.0.2-1733473107734 heartbeating to localhost/127.0.0.1:43731 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-131426492-172.17.0.2-1733473107734 (Datanode Uuid 1550839e-efe6-40a4-bd97-ee939e8dac26) service to localhost/127.0.0.1:43731 2024-12-06T08:21:39,922 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/cluster_82222ce2-4af9-3ff1-6acd-1ed38eead44b/dfs/data/data1/current/BP-131426492-172.17.0.2-1733473107734 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:21:39,922 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/cluster_82222ce2-4af9-3ff1-6acd-1ed38eead44b/dfs/data/data2/current/BP-131426492-172.17.0.2-1733473107734 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:21:39,923 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:21:39,930 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:21:39,931 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:21:39,931 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:21:39,931 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:21:39,931 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ad83943-005e-4825-4bc1-ad15b96f4492/hadoop.log.dir/,STOPPED} 2024-12-06T08:21:39,948 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T08:21:40,090 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down